xref: /openbmc/qemu/linux-user/syscall.c (revision eabfeb0c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #endif
118 #include "linux_loop.h"
119 #include "uname.h"
120 
121 #include "qemu.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
127 #include "tcg/tcg.h"
128 
129 #ifndef CLONE_IO
130 #define CLONE_IO                0x80000000      /* Clone io context */
131 #endif
132 
133 /* We can't directly call the host clone syscall, because this will
134  * badly confuse libc (breaking mutexes, for example). So we must
135  * divide clone flags into:
136  *  * flag combinations that look like pthread_create()
137  *  * flag combinations that look like fork()
138  *  * flags we can implement within QEMU itself
139  *  * flags we can't support and will return an error for
140  */
141 /* For thread creation, all these flags must be present; for
142  * fork, none must be present.
143  */
144 #define CLONE_THREAD_FLAGS                              \
145     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
146      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
147 
148 /* These flags are ignored:
149  * CLONE_DETACHED is now ignored by the kernel;
150  * CLONE_IO is just an optimisation hint to the I/O scheduler
151  */
152 #define CLONE_IGNORED_FLAGS                     \
153     (CLONE_DETACHED | CLONE_IO)
154 
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS               \
157     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
158      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
159 
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
162     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
163      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
164 
165 #define CLONE_INVALID_FORK_FLAGS                                        \
166     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
167 
168 #define CLONE_INVALID_THREAD_FLAGS                                      \
169     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
170        CLONE_IGNORED_FLAGS))
171 
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173  * have almost all been allocated. We cannot support any of
174  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176  * The checks against the invalid thread masks above will catch these.
177  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
178  */
179 
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181  * once. This exercises the codepaths for restart.
182  */
183 //#define DEBUG_ERESTARTSYS
184 
185 //#include <linux/msdos_fs.h>
186 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
187 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
188 
189 #undef _syscall0
190 #undef _syscall1
191 #undef _syscall2
192 #undef _syscall3
193 #undef _syscall4
194 #undef _syscall5
195 #undef _syscall6
196 
197 #define _syscall0(type,name)		\
198 static type name (void)			\
199 {					\
200 	return syscall(__NR_##name);	\
201 }
202 
203 #define _syscall1(type,name,type1,arg1)		\
204 static type name (type1 arg1)			\
205 {						\
206 	return syscall(__NR_##name, arg1);	\
207 }
208 
209 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
210 static type name (type1 arg1,type2 arg2)		\
211 {							\
212 	return syscall(__NR_##name, arg1, arg2);	\
213 }
214 
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
216 static type name (type1 arg1,type2 arg2,type3 arg3)		\
217 {								\
218 	return syscall(__NR_##name, arg1, arg2, arg3);		\
219 }
220 
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
223 {										\
224 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
225 }
226 
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
228 		  type5,arg5)							\
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
230 {										\
231 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
232 }
233 
234 
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5,type6,arg6)					\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
238                   type6 arg6)							\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
241 }
242 
243 
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
254 #endif
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
257 #endif
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
262 
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
265 #endif
266 
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
270 #endif
271 
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid)
274 
275 /* For the 64-bit guest on 32-bit host case we must emulate
276  * getdents using getdents64, because otherwise the host
277  * might hand us back more dirent records than we can fit
278  * into the guest buffer after structure format conversion.
279  * Otherwise we emulate getdents with getdents if the host has it.
280  */
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
283 #endif
284 
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
287 #endif
288 #if (defined(TARGET_NR_getdents) && \
289       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
292 #endif
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
295           loff_t *, res, uint, wh);
296 #endif
297 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
298 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
299           siginfo_t *, uinfo)
300 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group,int,error_code)
303 #endif
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address,int *,tidptr)
306 #endif
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
309           const struct timespec *,timeout,int *,uaddr2,int,val3)
310 #endif
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
317           unsigned long *, user_mask_ptr);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
320           unsigned long *, user_mask_ptr);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
323 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
324           void *, arg);
325 _syscall2(int, capget, struct __user_cap_header_struct *, header,
326           struct __user_cap_data_struct *, data);
327 _syscall2(int, capset, struct __user_cap_header_struct *, header,
328           struct __user_cap_data_struct *, data);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get, int, which, int, who)
331 #endif
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
334 #endif
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
337 #endif
338 
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
341           unsigned long, idx1, unsigned long, idx2)
342 #endif
343 
344 /*
345  * It is assumed that struct statx is architecture independent.
346  */
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
349           unsigned int, mask, struct target_statx *, statxbuf)
350 #endif
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier, int, cmd, int, flags)
353 #endif
354 
355 static bitmask_transtbl fcntl_flags_tbl[] = {
356   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
357   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
358   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
359   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
360   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
361   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
362   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
363   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
364   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
365   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
366   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
367   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
368   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
369 #if defined(O_DIRECT)
370   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
371 #endif
372 #if defined(O_NOATIME)
373   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
374 #endif
375 #if defined(O_CLOEXEC)
376   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
377 #endif
378 #if defined(O_PATH)
379   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
380 #endif
381 #if defined(O_TMPFILE)
382   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
383 #endif
384   /* Don't terminate the list prematurely on 64-bit host+guest.  */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
387 #endif
388   { 0, 0, 0, 0 }
389 };
390 
391 static int sys_getcwd1(char *buf, size_t size)
392 {
393   if (getcwd(buf, size) == NULL) {
394       /* getcwd() sets errno */
395       return (-1);
396   }
397   return strlen(buf)+1;
398 }
399 
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
404           const struct timespec *,tsp,int,flags)
405 #else
406 static int sys_utimensat(int dirfd, const char *pathname,
407                          const struct timespec times[2], int flags)
408 {
409     errno = ENOSYS;
410     return -1;
411 }
412 #endif
413 #endif /* TARGET_NR_utimensat */
414 
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
419           const char *, new, unsigned int, flags)
420 #else
421 static int sys_renameat2(int oldfd, const char *old,
422                          int newfd, const char *new, int flags)
423 {
424     if (flags == 0) {
425         return renameat(oldfd, old, newfd, new);
426     }
427     errno = ENOSYS;
428     return -1;
429 }
430 #endif
431 #endif /* TARGET_NR_renameat2 */
432 
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
435 
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
438 {
439   return (inotify_init());
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
444 {
445   return (inotify_add_watch(fd, pathname, mask));
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd, int32_t wd)
450 {
451   return (inotify_rm_watch(fd, wd));
452 }
453 #endif
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags)
457 {
458   return (inotify_init1(flags));
459 }
460 #endif
461 #endif
462 #else
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY  */
469 
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
473 #endif
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64 {
477     uint64_t rlim_cur;
478     uint64_t rlim_max;
479 };
480 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
481           const struct host_rlimit64 *, new_limit,
482           struct host_rlimit64 *, old_limit)
483 #endif
484 
485 
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers[32] = { 0, } ;
489 
490 static inline int next_free_host_timer(void)
491 {
492     int k ;
493     /* FIXME: Does finding the next free slot require a lock? */
494     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
495         if (g_posix_timers[k] == 0) {
496             g_posix_timers[k] = (timer_t) 1;
497             return k;
498         }
499     }
500     return -1;
501 }
502 #endif
503 
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
505 #ifdef TARGET_ARM
506 static inline int regpairs_aligned(void *cpu_env, int num)
507 {
508     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
509 }
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514  * of registers which translates to the same as ARM/MIPS, because we start with
515  * r3 as arg1 */
516 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env, int num)
520 {
521     switch (num) {
522     case TARGET_NR_pread64:
523     case TARGET_NR_pwrite64:
524         return 1;
525 
526     default:
527         return 0;
528     }
529 }
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
532 #else
533 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
534 #endif
535 
536 #define ERRNO_TABLE_SIZE 1200
537 
538 /* target_to_host_errno_table[] is initialized from
539  * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
541 };
542 
543 /*
544  * This list is the union of errno values overridden in asm-<arch>/errno.h
545  * minus the errnos that are not actually generic to all archs.
546  */
547 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
548     [EAGAIN]		= TARGET_EAGAIN,
549     [EIDRM]		= TARGET_EIDRM,
550     [ECHRNG]		= TARGET_ECHRNG,
551     [EL2NSYNC]		= TARGET_EL2NSYNC,
552     [EL3HLT]		= TARGET_EL3HLT,
553     [EL3RST]		= TARGET_EL3RST,
554     [ELNRNG]		= TARGET_ELNRNG,
555     [EUNATCH]		= TARGET_EUNATCH,
556     [ENOCSI]		= TARGET_ENOCSI,
557     [EL2HLT]		= TARGET_EL2HLT,
558     [EDEADLK]		= TARGET_EDEADLK,
559     [ENOLCK]		= TARGET_ENOLCK,
560     [EBADE]		= TARGET_EBADE,
561     [EBADR]		= TARGET_EBADR,
562     [EXFULL]		= TARGET_EXFULL,
563     [ENOANO]		= TARGET_ENOANO,
564     [EBADRQC]		= TARGET_EBADRQC,
565     [EBADSLT]		= TARGET_EBADSLT,
566     [EBFONT]		= TARGET_EBFONT,
567     [ENOSTR]		= TARGET_ENOSTR,
568     [ENODATA]		= TARGET_ENODATA,
569     [ETIME]		= TARGET_ETIME,
570     [ENOSR]		= TARGET_ENOSR,
571     [ENONET]		= TARGET_ENONET,
572     [ENOPKG]		= TARGET_ENOPKG,
573     [EREMOTE]		= TARGET_EREMOTE,
574     [ENOLINK]		= TARGET_ENOLINK,
575     [EADV]		= TARGET_EADV,
576     [ESRMNT]		= TARGET_ESRMNT,
577     [ECOMM]		= TARGET_ECOMM,
578     [EPROTO]		= TARGET_EPROTO,
579     [EDOTDOT]		= TARGET_EDOTDOT,
580     [EMULTIHOP]		= TARGET_EMULTIHOP,
581     [EBADMSG]		= TARGET_EBADMSG,
582     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
583     [EOVERFLOW]		= TARGET_EOVERFLOW,
584     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
585     [EBADFD]		= TARGET_EBADFD,
586     [EREMCHG]		= TARGET_EREMCHG,
587     [ELIBACC]		= TARGET_ELIBACC,
588     [ELIBBAD]		= TARGET_ELIBBAD,
589     [ELIBSCN]		= TARGET_ELIBSCN,
590     [ELIBMAX]		= TARGET_ELIBMAX,
591     [ELIBEXEC]		= TARGET_ELIBEXEC,
592     [EILSEQ]		= TARGET_EILSEQ,
593     [ENOSYS]		= TARGET_ENOSYS,
594     [ELOOP]		= TARGET_ELOOP,
595     [ERESTART]		= TARGET_ERESTART,
596     [ESTRPIPE]		= TARGET_ESTRPIPE,
597     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
598     [EUSERS]		= TARGET_EUSERS,
599     [ENOTSOCK]		= TARGET_ENOTSOCK,
600     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
601     [EMSGSIZE]		= TARGET_EMSGSIZE,
602     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
603     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
604     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
605     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
606     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
607     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
608     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
609     [EADDRINUSE]	= TARGET_EADDRINUSE,
610     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
611     [ENETDOWN]		= TARGET_ENETDOWN,
612     [ENETUNREACH]	= TARGET_ENETUNREACH,
613     [ENETRESET]		= TARGET_ENETRESET,
614     [ECONNABORTED]	= TARGET_ECONNABORTED,
615     [ECONNRESET]	= TARGET_ECONNRESET,
616     [ENOBUFS]		= TARGET_ENOBUFS,
617     [EISCONN]		= TARGET_EISCONN,
618     [ENOTCONN]		= TARGET_ENOTCONN,
619     [EUCLEAN]		= TARGET_EUCLEAN,
620     [ENOTNAM]		= TARGET_ENOTNAM,
621     [ENAVAIL]		= TARGET_ENAVAIL,
622     [EISNAM]		= TARGET_EISNAM,
623     [EREMOTEIO]		= TARGET_EREMOTEIO,
624     [EDQUOT]            = TARGET_EDQUOT,
625     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
626     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
627     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
628     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
629     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
630     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
631     [EALREADY]		= TARGET_EALREADY,
632     [EINPROGRESS]	= TARGET_EINPROGRESS,
633     [ESTALE]		= TARGET_ESTALE,
634     [ECANCELED]		= TARGET_ECANCELED,
635     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
636     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
637 #ifdef ENOKEY
638     [ENOKEY]		= TARGET_ENOKEY,
639 #endif
640 #ifdef EKEYEXPIRED
641     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
642 #endif
643 #ifdef EKEYREVOKED
644     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
645 #endif
646 #ifdef EKEYREJECTED
647     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
648 #endif
649 #ifdef EOWNERDEAD
650     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
651 #endif
652 #ifdef ENOTRECOVERABLE
653     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
654 #endif
655 #ifdef ENOMSG
656     [ENOMSG]            = TARGET_ENOMSG,
657 #endif
658 #ifdef ERKFILL
659     [ERFKILL]           = TARGET_ERFKILL,
660 #endif
661 #ifdef EHWPOISON
662     [EHWPOISON]         = TARGET_EHWPOISON,
663 #endif
664 };
665 
666 static inline int host_to_target_errno(int err)
667 {
668     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
669         host_to_target_errno_table[err]) {
670         return host_to_target_errno_table[err];
671     }
672     return err;
673 }
674 
675 static inline int target_to_host_errno(int err)
676 {
677     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
678         target_to_host_errno_table[err]) {
679         return target_to_host_errno_table[err];
680     }
681     return err;
682 }
683 
684 static inline abi_long get_errno(abi_long ret)
685 {
686     if (ret == -1)
687         return -host_to_target_errno(errno);
688     else
689         return ret;
690 }
691 
692 const char *target_strerror(int err)
693 {
694     if (err == TARGET_ERESTARTSYS) {
695         return "To be restarted";
696     }
697     if (err == TARGET_QEMU_ESIGRETURN) {
698         return "Successful exit from sigreturn";
699     }
700 
701     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
702         return NULL;
703     }
704     return strerror(target_to_host_errno(err));
705 }
706 
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
709 { \
710     return safe_syscall(__NR_##name); \
711 }
712 
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
715 { \
716     return safe_syscall(__NR_##name, arg1); \
717 }
718 
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
721 { \
722     return safe_syscall(__NR_##name, arg1, arg2); \
723 }
724 
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
727 { \
728     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
729 }
730 
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
732     type4, arg4) \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
734 { \
735     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
736 }
737 
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739     type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
741     type5 arg5) \
742 { \
743     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
744 }
745 
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747     type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749     type5 arg5, type6 arg6) \
750 { \
751     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
752 }
753 
754 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
755 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
756 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
757               int, flags, mode_t, mode)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
760               struct rusage *, rusage)
761 #endif
762 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
763               int, options, struct rusage *, rusage)
764 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766     defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
768               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
769 #endif
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
772               struct timespec *, tsp, const sigset_t *, sigmask,
773               size_t, sigsetsize)
774 #endif
775 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
776               int, maxevents, int, timeout, const sigset_t *, sigmask,
777               size_t, sigsetsize)
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
780               const struct timespec *,timeout,int *,uaddr2,int,val3)
781 #endif
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
784               const struct timespec *,timeout,int *,uaddr2,int,val3)
785 #endif
786 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
787 safe_syscall2(int, kill, pid_t, pid, int, sig)
788 safe_syscall2(int, tkill, int, tid, int, sig)
789 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
790 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
791 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
792 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
793               unsigned long, pos_l, unsigned long, pos_h)
794 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
795               unsigned long, pos_l, unsigned long, pos_h)
796 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
797               socklen_t, addrlen)
798 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
799               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
800 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
801               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
802 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
803 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
804 safe_syscall2(int, flock, int, fd, int, operation)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
807               const struct timespec *, uts, size_t, sigsetsize)
808 #endif
809 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
810               int, flags)
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep, const struct timespec *, req,
813               struct timespec *, rem)
814 #endif
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
817               const struct timespec *, req, struct timespec *, rem)
818 #endif
819 #ifdef __NR_ipc
820 #ifdef __s390x__
821 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
822               void *, ptr)
823 #else
824 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
825               void *, ptr, long, fifth)
826 #endif
827 #endif
828 #ifdef __NR_msgsnd
829 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
830               int, flags)
831 #endif
832 #ifdef __NR_msgrcv
833 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
834               long, msgtype, int, flags)
835 #endif
836 #ifdef __NR_semtimedop
837 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
838               unsigned, nsops, const struct timespec *, timeout)
839 #endif
840 #ifdef TARGET_NR_mq_timedsend
841 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
842               size_t, len, unsigned, prio, const struct timespec *, timeout)
843 #endif
844 #ifdef TARGET_NR_mq_timedreceive
845 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
846               size_t, len, unsigned *, prio, const struct timespec *, timeout)
847 #endif
848 /* We do ioctl like this rather than via safe_syscall3 to preserve the
849  * "third argument might be integer or pointer or not present" behaviour of
850  * the libc function.
851  */
852 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
853 /* Similarly for fcntl. Note that callers must always:
854  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
855  *  use the flock64 struct rather than unsuffixed flock
856  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
857  */
858 #ifdef __NR_fcntl64
859 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
860 #else
861 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
862 #endif
863 
864 static inline int host_to_target_sock_type(int host_type)
865 {
866     int target_type;
867 
868     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
869     case SOCK_DGRAM:
870         target_type = TARGET_SOCK_DGRAM;
871         break;
872     case SOCK_STREAM:
873         target_type = TARGET_SOCK_STREAM;
874         break;
875     default:
876         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
877         break;
878     }
879 
880 #if defined(SOCK_CLOEXEC)
881     if (host_type & SOCK_CLOEXEC) {
882         target_type |= TARGET_SOCK_CLOEXEC;
883     }
884 #endif
885 
886 #if defined(SOCK_NONBLOCK)
887     if (host_type & SOCK_NONBLOCK) {
888         target_type |= TARGET_SOCK_NONBLOCK;
889     }
890 #endif
891 
892     return target_type;
893 }
894 
895 static abi_ulong target_brk;
896 static abi_ulong target_original_brk;
897 static abi_ulong brk_page;
898 
899 void target_set_brk(abi_ulong new_brk)
900 {
901     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
902     brk_page = HOST_PAGE_ALIGN(target_brk);
903 }
904 
905 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
906 #define DEBUGF_BRK(message, args...)
907 
908 /* do_brk() must return target values and target errnos. */
909 abi_long do_brk(abi_ulong new_brk)
910 {
911     abi_long mapped_addr;
912     abi_ulong new_alloc_size;
913 
914     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
915 
916     if (!new_brk) {
917         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
918         return target_brk;
919     }
920     if (new_brk < target_original_brk) {
921         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
922                    target_brk);
923         return target_brk;
924     }
925 
926     /* If the new brk is less than the highest page reserved to the
927      * target heap allocation, set it and we're almost done...  */
928     if (new_brk <= brk_page) {
929         /* Heap contents are initialized to zero, as for anonymous
930          * mapped pages.  */
931         if (new_brk > target_brk) {
932             memset(g2h(target_brk), 0, new_brk - target_brk);
933         }
934 	target_brk = new_brk;
935         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
936 	return target_brk;
937     }
938 
939     /* We need to allocate more memory after the brk... Note that
940      * we don't use MAP_FIXED because that will map over the top of
941      * any existing mapping (like the one with the host libc or qemu
942      * itself); instead we treat "mapped but at wrong address" as
943      * a failure and unmap again.
944      */
945     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
946     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
947                                         PROT_READ|PROT_WRITE,
948                                         MAP_ANON|MAP_PRIVATE, 0, 0));
949 
950     if (mapped_addr == brk_page) {
951         /* Heap contents are initialized to zero, as for anonymous
952          * mapped pages.  Technically the new pages are already
953          * initialized to zero since they *are* anonymous mapped
954          * pages, however we have to take care with the contents that
955          * come from the remaining part of the previous page: it may
956          * contains garbage data due to a previous heap usage (grown
957          * then shrunken).  */
958         memset(g2h(target_brk), 0, brk_page - target_brk);
959 
960         target_brk = new_brk;
961         brk_page = HOST_PAGE_ALIGN(target_brk);
962         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
963             target_brk);
964         return target_brk;
965     } else if (mapped_addr != -1) {
966         /* Mapped but at wrong address, meaning there wasn't actually
967          * enough space for this brk.
968          */
969         target_munmap(mapped_addr, new_alloc_size);
970         mapped_addr = -1;
971         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
972     }
973     else {
974         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
975     }
976 
977 #if defined(TARGET_ALPHA)
978     /* We (partially) emulate OSF/1 on Alpha, which requires we
979        return a proper errno, not an unchanged brk value.  */
980     return -TARGET_ENOMEM;
981 #endif
982     /* For everything else, return the previous break. */
983     return target_brk;
984 }
985 
986 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
987     defined(TARGET_NR_pselect6)
988 static inline abi_long copy_from_user_fdset(fd_set *fds,
989                                             abi_ulong target_fds_addr,
990                                             int n)
991 {
992     int i, nw, j, k;
993     abi_ulong b, *target_fds;
994 
995     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
996     if (!(target_fds = lock_user(VERIFY_READ,
997                                  target_fds_addr,
998                                  sizeof(abi_ulong) * nw,
999                                  1)))
1000         return -TARGET_EFAULT;
1001 
1002     FD_ZERO(fds);
1003     k = 0;
1004     for (i = 0; i < nw; i++) {
1005         /* grab the abi_ulong */
1006         __get_user(b, &target_fds[i]);
1007         for (j = 0; j < TARGET_ABI_BITS; j++) {
1008             /* check the bit inside the abi_ulong */
1009             if ((b >> j) & 1)
1010                 FD_SET(k, fds);
1011             k++;
1012         }
1013     }
1014 
1015     unlock_user(target_fds, target_fds_addr, 0);
1016 
1017     return 0;
1018 }
1019 
1020 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1021                                                  abi_ulong target_fds_addr,
1022                                                  int n)
1023 {
1024     if (target_fds_addr) {
1025         if (copy_from_user_fdset(fds, target_fds_addr, n))
1026             return -TARGET_EFAULT;
1027         *fds_ptr = fds;
1028     } else {
1029         *fds_ptr = NULL;
1030     }
1031     return 0;
1032 }
1033 
1034 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1035                                           const fd_set *fds,
1036                                           int n)
1037 {
1038     int i, nw, j, k;
1039     abi_long v;
1040     abi_ulong *target_fds;
1041 
1042     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1043     if (!(target_fds = lock_user(VERIFY_WRITE,
1044                                  target_fds_addr,
1045                                  sizeof(abi_ulong) * nw,
1046                                  0)))
1047         return -TARGET_EFAULT;
1048 
1049     k = 0;
1050     for (i = 0; i < nw; i++) {
1051         v = 0;
1052         for (j = 0; j < TARGET_ABI_BITS; j++) {
1053             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1054             k++;
1055         }
1056         __put_user(v, &target_fds[i]);
1057     }
1058 
1059     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1060 
1061     return 0;
1062 }
1063 #endif
1064 
1065 #if defined(__alpha__)
1066 #define HOST_HZ 1024
1067 #else
1068 #define HOST_HZ 100
1069 #endif
1070 
1071 static inline abi_long host_to_target_clock_t(long ticks)
1072 {
1073 #if HOST_HZ == TARGET_HZ
1074     return ticks;
1075 #else
1076     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1077 #endif
1078 }
1079 
1080 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1081                                              const struct rusage *rusage)
1082 {
1083     struct target_rusage *target_rusage;
1084 
1085     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1086         return -TARGET_EFAULT;
1087     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1088     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1089     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1090     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1091     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1092     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1093     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1094     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1095     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1096     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1097     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1098     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1099     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1100     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1101     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1102     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1103     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1104     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1105     unlock_user_struct(target_rusage, target_addr, 1);
1106 
1107     return 0;
1108 }
1109 
1110 #ifdef TARGET_NR_setrlimit
1111 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1112 {
1113     abi_ulong target_rlim_swap;
1114     rlim_t result;
1115 
1116     target_rlim_swap = tswapal(target_rlim);
1117     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1118         return RLIM_INFINITY;
1119 
1120     result = target_rlim_swap;
1121     if (target_rlim_swap != (rlim_t)result)
1122         return RLIM_INFINITY;
1123 
1124     return result;
1125 }
1126 #endif
1127 
1128 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1129 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1130 {
1131     abi_ulong target_rlim_swap;
1132     abi_ulong result;
1133 
1134     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1135         target_rlim_swap = TARGET_RLIM_INFINITY;
1136     else
1137         target_rlim_swap = rlim;
1138     result = tswapal(target_rlim_swap);
1139 
1140     return result;
1141 }
1142 #endif
1143 
1144 static inline int target_to_host_resource(int code)
1145 {
1146     switch (code) {
1147     case TARGET_RLIMIT_AS:
1148         return RLIMIT_AS;
1149     case TARGET_RLIMIT_CORE:
1150         return RLIMIT_CORE;
1151     case TARGET_RLIMIT_CPU:
1152         return RLIMIT_CPU;
1153     case TARGET_RLIMIT_DATA:
1154         return RLIMIT_DATA;
1155     case TARGET_RLIMIT_FSIZE:
1156         return RLIMIT_FSIZE;
1157     case TARGET_RLIMIT_LOCKS:
1158         return RLIMIT_LOCKS;
1159     case TARGET_RLIMIT_MEMLOCK:
1160         return RLIMIT_MEMLOCK;
1161     case TARGET_RLIMIT_MSGQUEUE:
1162         return RLIMIT_MSGQUEUE;
1163     case TARGET_RLIMIT_NICE:
1164         return RLIMIT_NICE;
1165     case TARGET_RLIMIT_NOFILE:
1166         return RLIMIT_NOFILE;
1167     case TARGET_RLIMIT_NPROC:
1168         return RLIMIT_NPROC;
1169     case TARGET_RLIMIT_RSS:
1170         return RLIMIT_RSS;
1171     case TARGET_RLIMIT_RTPRIO:
1172         return RLIMIT_RTPRIO;
1173     case TARGET_RLIMIT_SIGPENDING:
1174         return RLIMIT_SIGPENDING;
1175     case TARGET_RLIMIT_STACK:
1176         return RLIMIT_STACK;
1177     default:
1178         return code;
1179     }
1180 }
1181 
1182 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1183                                               abi_ulong target_tv_addr)
1184 {
1185     struct target_timeval *target_tv;
1186 
1187     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1188         return -TARGET_EFAULT;
1189     }
1190 
1191     __get_user(tv->tv_sec, &target_tv->tv_sec);
1192     __get_user(tv->tv_usec, &target_tv->tv_usec);
1193 
1194     unlock_user_struct(target_tv, target_tv_addr, 0);
1195 
1196     return 0;
1197 }
1198 
1199 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1200                                             const struct timeval *tv)
1201 {
1202     struct target_timeval *target_tv;
1203 
1204     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1205         return -TARGET_EFAULT;
1206     }
1207 
1208     __put_user(tv->tv_sec, &target_tv->tv_sec);
1209     __put_user(tv->tv_usec, &target_tv->tv_usec);
1210 
1211     unlock_user_struct(target_tv, target_tv_addr, 1);
1212 
1213     return 0;
1214 }
1215 
1216 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1217                                              const struct timeval *tv)
1218 {
1219     struct target__kernel_sock_timeval *target_tv;
1220 
1221     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1222         return -TARGET_EFAULT;
1223     }
1224 
1225     __put_user(tv->tv_sec, &target_tv->tv_sec);
1226     __put_user(tv->tv_usec, &target_tv->tv_usec);
1227 
1228     unlock_user_struct(target_tv, target_tv_addr, 1);
1229 
1230     return 0;
1231 }
1232 
1233 #if defined(TARGET_NR_futex) || \
1234     defined(TARGET_NR_rt_sigtimedwait) || \
1235     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1236     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1237     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1238     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1239     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop)
1240 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1241                                                abi_ulong target_addr)
1242 {
1243     struct target_timespec *target_ts;
1244 
1245     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1246         return -TARGET_EFAULT;
1247     }
1248     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1249     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1250     unlock_user_struct(target_ts, target_addr, 0);
1251     return 0;
1252 }
1253 #endif
1254 
1255 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1256 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1257                                                  abi_ulong target_addr)
1258 {
1259     struct target__kernel_timespec *target_ts;
1260 
1261     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1262         return -TARGET_EFAULT;
1263     }
1264     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1265     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1266     unlock_user_struct(target_ts, target_addr, 0);
1267     return 0;
1268 }
1269 #endif
1270 
1271 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1272                                                struct timespec *host_ts)
1273 {
1274     struct target_timespec *target_ts;
1275 
1276     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1277         return -TARGET_EFAULT;
1278     }
1279     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1280     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1281     unlock_user_struct(target_ts, target_addr, 1);
1282     return 0;
1283 }
1284 
1285 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1286                                                  struct timespec *host_ts)
1287 {
1288     struct target__kernel_timespec *target_ts;
1289 
1290     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1291         return -TARGET_EFAULT;
1292     }
1293     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1294     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1295     unlock_user_struct(target_ts, target_addr, 1);
1296     return 0;
1297 }
1298 
1299 #if defined(TARGET_NR_gettimeofday)
1300 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1301                                              struct timezone *tz)
1302 {
1303     struct target_timezone *target_tz;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1306         return -TARGET_EFAULT;
1307     }
1308 
1309     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1310     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1311 
1312     unlock_user_struct(target_tz, target_tz_addr, 1);
1313 
1314     return 0;
1315 }
1316 #endif
1317 
1318 #if defined(TARGET_NR_settimeofday)
1319 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1320                                                abi_ulong target_tz_addr)
1321 {
1322     struct target_timezone *target_tz;
1323 
1324     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1325         return -TARGET_EFAULT;
1326     }
1327 
1328     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1329     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1330 
1331     unlock_user_struct(target_tz, target_tz_addr, 0);
1332 
1333     return 0;
1334 }
1335 #endif
1336 
1337 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1338 #include <mqueue.h>
1339 
1340 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1341                                               abi_ulong target_mq_attr_addr)
1342 {
1343     struct target_mq_attr *target_mq_attr;
1344 
1345     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1346                           target_mq_attr_addr, 1))
1347         return -TARGET_EFAULT;
1348 
1349     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1350     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1351     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1352     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1353 
1354     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1355 
1356     return 0;
1357 }
1358 
1359 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1360                                             const struct mq_attr *attr)
1361 {
1362     struct target_mq_attr *target_mq_attr;
1363 
1364     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1365                           target_mq_attr_addr, 0))
1366         return -TARGET_EFAULT;
1367 
1368     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1369     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1370     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1371     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1372 
1373     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1374 
1375     return 0;
1376 }
1377 #endif
1378 
1379 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1380 /* do_select() must return target values and target errnos. */
1381 static abi_long do_select(int n,
1382                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1383                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1384 {
1385     fd_set rfds, wfds, efds;
1386     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387     struct timeval tv;
1388     struct timespec ts, *ts_ptr;
1389     abi_long ret;
1390 
1391     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1392     if (ret) {
1393         return ret;
1394     }
1395     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1396     if (ret) {
1397         return ret;
1398     }
1399     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1400     if (ret) {
1401         return ret;
1402     }
1403 
1404     if (target_tv_addr) {
1405         if (copy_from_user_timeval(&tv, target_tv_addr))
1406             return -TARGET_EFAULT;
1407         ts.tv_sec = tv.tv_sec;
1408         ts.tv_nsec = tv.tv_usec * 1000;
1409         ts_ptr = &ts;
1410     } else {
1411         ts_ptr = NULL;
1412     }
1413 
1414     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1415                                   ts_ptr, NULL));
1416 
1417     if (!is_error(ret)) {
1418         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1419             return -TARGET_EFAULT;
1420         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1421             return -TARGET_EFAULT;
1422         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1423             return -TARGET_EFAULT;
1424 
1425         if (target_tv_addr) {
1426             tv.tv_sec = ts.tv_sec;
1427             tv.tv_usec = ts.tv_nsec / 1000;
1428             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1429                 return -TARGET_EFAULT;
1430             }
1431         }
1432     }
1433 
1434     return ret;
1435 }
1436 
1437 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1438 static abi_long do_old_select(abi_ulong arg1)
1439 {
1440     struct target_sel_arg_struct *sel;
1441     abi_ulong inp, outp, exp, tvp;
1442     long nsel;
1443 
1444     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1445         return -TARGET_EFAULT;
1446     }
1447 
1448     nsel = tswapal(sel->n);
1449     inp = tswapal(sel->inp);
1450     outp = tswapal(sel->outp);
1451     exp = tswapal(sel->exp);
1452     tvp = tswapal(sel->tvp);
1453 
1454     unlock_user_struct(sel, arg1, 0);
1455 
1456     return do_select(nsel, inp, outp, exp, tvp);
1457 }
1458 #endif
1459 #endif
1460 
1461 static abi_long do_pipe2(int host_pipe[], int flags)
1462 {
1463 #ifdef CONFIG_PIPE2
1464     return pipe2(host_pipe, flags);
1465 #else
1466     return -ENOSYS;
1467 #endif
1468 }
1469 
1470 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1471                         int flags, int is_pipe2)
1472 {
1473     int host_pipe[2];
1474     abi_long ret;
1475     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1476 
1477     if (is_error(ret))
1478         return get_errno(ret);
1479 
1480     /* Several targets have special calling conventions for the original
1481        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1482     if (!is_pipe2) {
1483 #if defined(TARGET_ALPHA)
1484         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1485         return host_pipe[0];
1486 #elif defined(TARGET_MIPS)
1487         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1488         return host_pipe[0];
1489 #elif defined(TARGET_SH4)
1490         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1491         return host_pipe[0];
1492 #elif defined(TARGET_SPARC)
1493         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1494         return host_pipe[0];
1495 #endif
1496     }
1497 
1498     if (put_user_s32(host_pipe[0], pipedes)
1499         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1500         return -TARGET_EFAULT;
1501     return get_errno(ret);
1502 }
1503 
1504 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1505                                               abi_ulong target_addr,
1506                                               socklen_t len)
1507 {
1508     struct target_ip_mreqn *target_smreqn;
1509 
1510     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1511     if (!target_smreqn)
1512         return -TARGET_EFAULT;
1513     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1514     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1515     if (len == sizeof(struct target_ip_mreqn))
1516         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1517     unlock_user(target_smreqn, target_addr, 0);
1518 
1519     return 0;
1520 }
1521 
1522 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1523                                                abi_ulong target_addr,
1524                                                socklen_t len)
1525 {
1526     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1527     sa_family_t sa_family;
1528     struct target_sockaddr *target_saddr;
1529 
1530     if (fd_trans_target_to_host_addr(fd)) {
1531         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1532     }
1533 
1534     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1535     if (!target_saddr)
1536         return -TARGET_EFAULT;
1537 
1538     sa_family = tswap16(target_saddr->sa_family);
1539 
1540     /* Oops. The caller might send a incomplete sun_path; sun_path
1541      * must be terminated by \0 (see the manual page), but
1542      * unfortunately it is quite common to specify sockaddr_un
1543      * length as "strlen(x->sun_path)" while it should be
1544      * "strlen(...) + 1". We'll fix that here if needed.
1545      * Linux kernel has a similar feature.
1546      */
1547 
1548     if (sa_family == AF_UNIX) {
1549         if (len < unix_maxlen && len > 0) {
1550             char *cp = (char*)target_saddr;
1551 
1552             if ( cp[len-1] && !cp[len] )
1553                 len++;
1554         }
1555         if (len > unix_maxlen)
1556             len = unix_maxlen;
1557     }
1558 
1559     memcpy(addr, target_saddr, len);
1560     addr->sa_family = sa_family;
1561     if (sa_family == AF_NETLINK) {
1562         struct sockaddr_nl *nladdr;
1563 
1564         nladdr = (struct sockaddr_nl *)addr;
1565         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1566         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1567     } else if (sa_family == AF_PACKET) {
1568 	struct target_sockaddr_ll *lladdr;
1569 
1570 	lladdr = (struct target_sockaddr_ll *)addr;
1571 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1572 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1573     }
1574     unlock_user(target_saddr, target_addr, 0);
1575 
1576     return 0;
1577 }
1578 
1579 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1580                                                struct sockaddr *addr,
1581                                                socklen_t len)
1582 {
1583     struct target_sockaddr *target_saddr;
1584 
1585     if (len == 0) {
1586         return 0;
1587     }
1588     assert(addr);
1589 
1590     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1591     if (!target_saddr)
1592         return -TARGET_EFAULT;
1593     memcpy(target_saddr, addr, len);
1594     if (len >= offsetof(struct target_sockaddr, sa_family) +
1595         sizeof(target_saddr->sa_family)) {
1596         target_saddr->sa_family = tswap16(addr->sa_family);
1597     }
1598     if (addr->sa_family == AF_NETLINK &&
1599         len >= sizeof(struct target_sockaddr_nl)) {
1600         struct target_sockaddr_nl *target_nl =
1601                (struct target_sockaddr_nl *)target_saddr;
1602         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1603         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1604     } else if (addr->sa_family == AF_PACKET) {
1605         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1606         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1607         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1608     } else if (addr->sa_family == AF_INET6 &&
1609                len >= sizeof(struct target_sockaddr_in6)) {
1610         struct target_sockaddr_in6 *target_in6 =
1611                (struct target_sockaddr_in6 *)target_saddr;
1612         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1613     }
1614     unlock_user(target_saddr, target_addr, len);
1615 
1616     return 0;
1617 }
1618 
1619 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1620                                            struct target_msghdr *target_msgh)
1621 {
1622     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1623     abi_long msg_controllen;
1624     abi_ulong target_cmsg_addr;
1625     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1626     socklen_t space = 0;
1627 
1628     msg_controllen = tswapal(target_msgh->msg_controllen);
1629     if (msg_controllen < sizeof (struct target_cmsghdr))
1630         goto the_end;
1631     target_cmsg_addr = tswapal(target_msgh->msg_control);
1632     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1633     target_cmsg_start = target_cmsg;
1634     if (!target_cmsg)
1635         return -TARGET_EFAULT;
1636 
1637     while (cmsg && target_cmsg) {
1638         void *data = CMSG_DATA(cmsg);
1639         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1640 
1641         int len = tswapal(target_cmsg->cmsg_len)
1642             - sizeof(struct target_cmsghdr);
1643 
1644         space += CMSG_SPACE(len);
1645         if (space > msgh->msg_controllen) {
1646             space -= CMSG_SPACE(len);
1647             /* This is a QEMU bug, since we allocated the payload
1648              * area ourselves (unlike overflow in host-to-target
1649              * conversion, which is just the guest giving us a buffer
1650              * that's too small). It can't happen for the payload types
1651              * we currently support; if it becomes an issue in future
1652              * we would need to improve our allocation strategy to
1653              * something more intelligent than "twice the size of the
1654              * target buffer we're reading from".
1655              */
1656             qemu_log_mask(LOG_UNIMP,
1657                           ("Unsupported ancillary data %d/%d: "
1658                            "unhandled msg size\n"),
1659                           tswap32(target_cmsg->cmsg_level),
1660                           tswap32(target_cmsg->cmsg_type));
1661             break;
1662         }
1663 
1664         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1665             cmsg->cmsg_level = SOL_SOCKET;
1666         } else {
1667             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1668         }
1669         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1670         cmsg->cmsg_len = CMSG_LEN(len);
1671 
1672         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1673             int *fd = (int *)data;
1674             int *target_fd = (int *)target_data;
1675             int i, numfds = len / sizeof(int);
1676 
1677             for (i = 0; i < numfds; i++) {
1678                 __get_user(fd[i], target_fd + i);
1679             }
1680         } else if (cmsg->cmsg_level == SOL_SOCKET
1681                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1682             struct ucred *cred = (struct ucred *)data;
1683             struct target_ucred *target_cred =
1684                 (struct target_ucred *)target_data;
1685 
1686             __get_user(cred->pid, &target_cred->pid);
1687             __get_user(cred->uid, &target_cred->uid);
1688             __get_user(cred->gid, &target_cred->gid);
1689         } else {
1690             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1691                           cmsg->cmsg_level, cmsg->cmsg_type);
1692             memcpy(data, target_data, len);
1693         }
1694 
1695         cmsg = CMSG_NXTHDR(msgh, cmsg);
1696         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1697                                          target_cmsg_start);
1698     }
1699     unlock_user(target_cmsg, target_cmsg_addr, 0);
1700  the_end:
1701     msgh->msg_controllen = space;
1702     return 0;
1703 }
1704 
1705 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1706                                            struct msghdr *msgh)
1707 {
1708     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1709     abi_long msg_controllen;
1710     abi_ulong target_cmsg_addr;
1711     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1712     socklen_t space = 0;
1713 
1714     msg_controllen = tswapal(target_msgh->msg_controllen);
1715     if (msg_controllen < sizeof (struct target_cmsghdr))
1716         goto the_end;
1717     target_cmsg_addr = tswapal(target_msgh->msg_control);
1718     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1719     target_cmsg_start = target_cmsg;
1720     if (!target_cmsg)
1721         return -TARGET_EFAULT;
1722 
1723     while (cmsg && target_cmsg) {
1724         void *data = CMSG_DATA(cmsg);
1725         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1726 
1727         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1728         int tgt_len, tgt_space;
1729 
1730         /* We never copy a half-header but may copy half-data;
1731          * this is Linux's behaviour in put_cmsg(). Note that
1732          * truncation here is a guest problem (which we report
1733          * to the guest via the CTRUNC bit), unlike truncation
1734          * in target_to_host_cmsg, which is a QEMU bug.
1735          */
1736         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1737             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1738             break;
1739         }
1740 
1741         if (cmsg->cmsg_level == SOL_SOCKET) {
1742             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1743         } else {
1744             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1745         }
1746         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1747 
1748         /* Payload types which need a different size of payload on
1749          * the target must adjust tgt_len here.
1750          */
1751         tgt_len = len;
1752         switch (cmsg->cmsg_level) {
1753         case SOL_SOCKET:
1754             switch (cmsg->cmsg_type) {
1755             case SO_TIMESTAMP:
1756                 tgt_len = sizeof(struct target_timeval);
1757                 break;
1758             default:
1759                 break;
1760             }
1761             break;
1762         default:
1763             break;
1764         }
1765 
1766         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1767             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1768             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1769         }
1770 
1771         /* We must now copy-and-convert len bytes of payload
1772          * into tgt_len bytes of destination space. Bear in mind
1773          * that in both source and destination we may be dealing
1774          * with a truncated value!
1775          */
1776         switch (cmsg->cmsg_level) {
1777         case SOL_SOCKET:
1778             switch (cmsg->cmsg_type) {
1779             case SCM_RIGHTS:
1780             {
1781                 int *fd = (int *)data;
1782                 int *target_fd = (int *)target_data;
1783                 int i, numfds = tgt_len / sizeof(int);
1784 
1785                 for (i = 0; i < numfds; i++) {
1786                     __put_user(fd[i], target_fd + i);
1787                 }
1788                 break;
1789             }
1790             case SO_TIMESTAMP:
1791             {
1792                 struct timeval *tv = (struct timeval *)data;
1793                 struct target_timeval *target_tv =
1794                     (struct target_timeval *)target_data;
1795 
1796                 if (len != sizeof(struct timeval) ||
1797                     tgt_len != sizeof(struct target_timeval)) {
1798                     goto unimplemented;
1799                 }
1800 
1801                 /* copy struct timeval to target */
1802                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1803                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1804                 break;
1805             }
1806             case SCM_CREDENTIALS:
1807             {
1808                 struct ucred *cred = (struct ucred *)data;
1809                 struct target_ucred *target_cred =
1810                     (struct target_ucred *)target_data;
1811 
1812                 __put_user(cred->pid, &target_cred->pid);
1813                 __put_user(cred->uid, &target_cred->uid);
1814                 __put_user(cred->gid, &target_cred->gid);
1815                 break;
1816             }
1817             default:
1818                 goto unimplemented;
1819             }
1820             break;
1821 
1822         case SOL_IP:
1823             switch (cmsg->cmsg_type) {
1824             case IP_TTL:
1825             {
1826                 uint32_t *v = (uint32_t *)data;
1827                 uint32_t *t_int = (uint32_t *)target_data;
1828 
1829                 if (len != sizeof(uint32_t) ||
1830                     tgt_len != sizeof(uint32_t)) {
1831                     goto unimplemented;
1832                 }
1833                 __put_user(*v, t_int);
1834                 break;
1835             }
1836             case IP_RECVERR:
1837             {
1838                 struct errhdr_t {
1839                    struct sock_extended_err ee;
1840                    struct sockaddr_in offender;
1841                 };
1842                 struct errhdr_t *errh = (struct errhdr_t *)data;
1843                 struct errhdr_t *target_errh =
1844                     (struct errhdr_t *)target_data;
1845 
1846                 if (len != sizeof(struct errhdr_t) ||
1847                     tgt_len != sizeof(struct errhdr_t)) {
1848                     goto unimplemented;
1849                 }
1850                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1851                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1852                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1853                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1854                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1855                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1856                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1857                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1858                     (void *) &errh->offender, sizeof(errh->offender));
1859                 break;
1860             }
1861             default:
1862                 goto unimplemented;
1863             }
1864             break;
1865 
1866         case SOL_IPV6:
1867             switch (cmsg->cmsg_type) {
1868             case IPV6_HOPLIMIT:
1869             {
1870                 uint32_t *v = (uint32_t *)data;
1871                 uint32_t *t_int = (uint32_t *)target_data;
1872 
1873                 if (len != sizeof(uint32_t) ||
1874                     tgt_len != sizeof(uint32_t)) {
1875                     goto unimplemented;
1876                 }
1877                 __put_user(*v, t_int);
1878                 break;
1879             }
1880             case IPV6_RECVERR:
1881             {
1882                 struct errhdr6_t {
1883                    struct sock_extended_err ee;
1884                    struct sockaddr_in6 offender;
1885                 };
1886                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1887                 struct errhdr6_t *target_errh =
1888                     (struct errhdr6_t *)target_data;
1889 
1890                 if (len != sizeof(struct errhdr6_t) ||
1891                     tgt_len != sizeof(struct errhdr6_t)) {
1892                     goto unimplemented;
1893                 }
1894                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1895                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1896                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1897                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1898                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1899                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1900                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1901                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1902                     (void *) &errh->offender, sizeof(errh->offender));
1903                 break;
1904             }
1905             default:
1906                 goto unimplemented;
1907             }
1908             break;
1909 
1910         default:
1911         unimplemented:
1912             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1913                           cmsg->cmsg_level, cmsg->cmsg_type);
1914             memcpy(target_data, data, MIN(len, tgt_len));
1915             if (tgt_len > len) {
1916                 memset(target_data + len, 0, tgt_len - len);
1917             }
1918         }
1919 
1920         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1921         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1922         if (msg_controllen < tgt_space) {
1923             tgt_space = msg_controllen;
1924         }
1925         msg_controllen -= tgt_space;
1926         space += tgt_space;
1927         cmsg = CMSG_NXTHDR(msgh, cmsg);
1928         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1929                                          target_cmsg_start);
1930     }
1931     unlock_user(target_cmsg, target_cmsg_addr, space);
1932  the_end:
1933     target_msgh->msg_controllen = tswapal(space);
1934     return 0;
1935 }
1936 
1937 /* do_setsockopt() Must return target values and target errnos. */
1938 static abi_long do_setsockopt(int sockfd, int level, int optname,
1939                               abi_ulong optval_addr, socklen_t optlen)
1940 {
1941     abi_long ret;
1942     int val;
1943     struct ip_mreqn *ip_mreq;
1944     struct ip_mreq_source *ip_mreq_source;
1945 
1946     switch(level) {
1947     case SOL_TCP:
1948         /* TCP options all take an 'int' value.  */
1949         if (optlen < sizeof(uint32_t))
1950             return -TARGET_EINVAL;
1951 
1952         if (get_user_u32(val, optval_addr))
1953             return -TARGET_EFAULT;
1954         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1955         break;
1956     case SOL_IP:
1957         switch(optname) {
1958         case IP_TOS:
1959         case IP_TTL:
1960         case IP_HDRINCL:
1961         case IP_ROUTER_ALERT:
1962         case IP_RECVOPTS:
1963         case IP_RETOPTS:
1964         case IP_PKTINFO:
1965         case IP_MTU_DISCOVER:
1966         case IP_RECVERR:
1967         case IP_RECVTTL:
1968         case IP_RECVTOS:
1969 #ifdef IP_FREEBIND
1970         case IP_FREEBIND:
1971 #endif
1972         case IP_MULTICAST_TTL:
1973         case IP_MULTICAST_LOOP:
1974             val = 0;
1975             if (optlen >= sizeof(uint32_t)) {
1976                 if (get_user_u32(val, optval_addr))
1977                     return -TARGET_EFAULT;
1978             } else if (optlen >= 1) {
1979                 if (get_user_u8(val, optval_addr))
1980                     return -TARGET_EFAULT;
1981             }
1982             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1983             break;
1984         case IP_ADD_MEMBERSHIP:
1985         case IP_DROP_MEMBERSHIP:
1986             if (optlen < sizeof (struct target_ip_mreq) ||
1987                 optlen > sizeof (struct target_ip_mreqn))
1988                 return -TARGET_EINVAL;
1989 
1990             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1991             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1992             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1993             break;
1994 
1995         case IP_BLOCK_SOURCE:
1996         case IP_UNBLOCK_SOURCE:
1997         case IP_ADD_SOURCE_MEMBERSHIP:
1998         case IP_DROP_SOURCE_MEMBERSHIP:
1999             if (optlen != sizeof (struct target_ip_mreq_source))
2000                 return -TARGET_EINVAL;
2001 
2002             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2003             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2004             unlock_user (ip_mreq_source, optval_addr, 0);
2005             break;
2006 
2007         default:
2008             goto unimplemented;
2009         }
2010         break;
2011     case SOL_IPV6:
2012         switch (optname) {
2013         case IPV6_MTU_DISCOVER:
2014         case IPV6_MTU:
2015         case IPV6_V6ONLY:
2016         case IPV6_RECVPKTINFO:
2017         case IPV6_UNICAST_HOPS:
2018         case IPV6_MULTICAST_HOPS:
2019         case IPV6_MULTICAST_LOOP:
2020         case IPV6_RECVERR:
2021         case IPV6_RECVHOPLIMIT:
2022         case IPV6_2292HOPLIMIT:
2023         case IPV6_CHECKSUM:
2024         case IPV6_ADDRFORM:
2025         case IPV6_2292PKTINFO:
2026         case IPV6_RECVTCLASS:
2027         case IPV6_RECVRTHDR:
2028         case IPV6_2292RTHDR:
2029         case IPV6_RECVHOPOPTS:
2030         case IPV6_2292HOPOPTS:
2031         case IPV6_RECVDSTOPTS:
2032         case IPV6_2292DSTOPTS:
2033         case IPV6_TCLASS:
2034 #ifdef IPV6_RECVPATHMTU
2035         case IPV6_RECVPATHMTU:
2036 #endif
2037 #ifdef IPV6_TRANSPARENT
2038         case IPV6_TRANSPARENT:
2039 #endif
2040 #ifdef IPV6_FREEBIND
2041         case IPV6_FREEBIND:
2042 #endif
2043 #ifdef IPV6_RECVORIGDSTADDR
2044         case IPV6_RECVORIGDSTADDR:
2045 #endif
2046             val = 0;
2047             if (optlen < sizeof(uint32_t)) {
2048                 return -TARGET_EINVAL;
2049             }
2050             if (get_user_u32(val, optval_addr)) {
2051                 return -TARGET_EFAULT;
2052             }
2053             ret = get_errno(setsockopt(sockfd, level, optname,
2054                                        &val, sizeof(val)));
2055             break;
2056         case IPV6_PKTINFO:
2057         {
2058             struct in6_pktinfo pki;
2059 
2060             if (optlen < sizeof(pki)) {
2061                 return -TARGET_EINVAL;
2062             }
2063 
2064             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2065                 return -TARGET_EFAULT;
2066             }
2067 
2068             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2069 
2070             ret = get_errno(setsockopt(sockfd, level, optname,
2071                                        &pki, sizeof(pki)));
2072             break;
2073         }
2074         case IPV6_ADD_MEMBERSHIP:
2075         case IPV6_DROP_MEMBERSHIP:
2076         {
2077             struct ipv6_mreq ipv6mreq;
2078 
2079             if (optlen < sizeof(ipv6mreq)) {
2080                 return -TARGET_EINVAL;
2081             }
2082 
2083             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2084                 return -TARGET_EFAULT;
2085             }
2086 
2087             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2088 
2089             ret = get_errno(setsockopt(sockfd, level, optname,
2090                                        &ipv6mreq, sizeof(ipv6mreq)));
2091             break;
2092         }
2093         default:
2094             goto unimplemented;
2095         }
2096         break;
2097     case SOL_ICMPV6:
2098         switch (optname) {
2099         case ICMPV6_FILTER:
2100         {
2101             struct icmp6_filter icmp6f;
2102 
2103             if (optlen > sizeof(icmp6f)) {
2104                 optlen = sizeof(icmp6f);
2105             }
2106 
2107             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2108                 return -TARGET_EFAULT;
2109             }
2110 
2111             for (val = 0; val < 8; val++) {
2112                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2113             }
2114 
2115             ret = get_errno(setsockopt(sockfd, level, optname,
2116                                        &icmp6f, optlen));
2117             break;
2118         }
2119         default:
2120             goto unimplemented;
2121         }
2122         break;
2123     case SOL_RAW:
2124         switch (optname) {
2125         case ICMP_FILTER:
2126         case IPV6_CHECKSUM:
2127             /* those take an u32 value */
2128             if (optlen < sizeof(uint32_t)) {
2129                 return -TARGET_EINVAL;
2130             }
2131 
2132             if (get_user_u32(val, optval_addr)) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname,
2136                                        &val, sizeof(val)));
2137             break;
2138 
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2144     case SOL_ALG:
2145         switch (optname) {
2146         case ALG_SET_KEY:
2147         {
2148             char *alg_key = g_malloc(optlen);
2149 
2150             if (!alg_key) {
2151                 return -TARGET_ENOMEM;
2152             }
2153             if (copy_from_user(alg_key, optval_addr, optlen)) {
2154                 g_free(alg_key);
2155                 return -TARGET_EFAULT;
2156             }
2157             ret = get_errno(setsockopt(sockfd, level, optname,
2158                                        alg_key, optlen));
2159             g_free(alg_key);
2160             break;
2161         }
2162         case ALG_SET_AEAD_AUTHSIZE:
2163         {
2164             ret = get_errno(setsockopt(sockfd, level, optname,
2165                                        NULL, optlen));
2166             break;
2167         }
2168         default:
2169             goto unimplemented;
2170         }
2171         break;
2172 #endif
2173     case TARGET_SOL_SOCKET:
2174         switch (optname) {
2175         case TARGET_SO_RCVTIMEO:
2176         {
2177                 struct timeval tv;
2178 
2179                 optname = SO_RCVTIMEO;
2180 
2181 set_timeout:
2182                 if (optlen != sizeof(struct target_timeval)) {
2183                     return -TARGET_EINVAL;
2184                 }
2185 
2186                 if (copy_from_user_timeval(&tv, optval_addr)) {
2187                     return -TARGET_EFAULT;
2188                 }
2189 
2190                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2191                                 &tv, sizeof(tv)));
2192                 return ret;
2193         }
2194         case TARGET_SO_SNDTIMEO:
2195                 optname = SO_SNDTIMEO;
2196                 goto set_timeout;
2197         case TARGET_SO_ATTACH_FILTER:
2198         {
2199                 struct target_sock_fprog *tfprog;
2200                 struct target_sock_filter *tfilter;
2201                 struct sock_fprog fprog;
2202                 struct sock_filter *filter;
2203                 int i;
2204 
2205                 if (optlen != sizeof(*tfprog)) {
2206                     return -TARGET_EINVAL;
2207                 }
2208                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2209                     return -TARGET_EFAULT;
2210                 }
2211                 if (!lock_user_struct(VERIFY_READ, tfilter,
2212                                       tswapal(tfprog->filter), 0)) {
2213                     unlock_user_struct(tfprog, optval_addr, 1);
2214                     return -TARGET_EFAULT;
2215                 }
2216 
2217                 fprog.len = tswap16(tfprog->len);
2218                 filter = g_try_new(struct sock_filter, fprog.len);
2219                 if (filter == NULL) {
2220                     unlock_user_struct(tfilter, tfprog->filter, 1);
2221                     unlock_user_struct(tfprog, optval_addr, 1);
2222                     return -TARGET_ENOMEM;
2223                 }
2224                 for (i = 0; i < fprog.len; i++) {
2225                     filter[i].code = tswap16(tfilter[i].code);
2226                     filter[i].jt = tfilter[i].jt;
2227                     filter[i].jf = tfilter[i].jf;
2228                     filter[i].k = tswap32(tfilter[i].k);
2229                 }
2230                 fprog.filter = filter;
2231 
2232                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2233                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2234                 g_free(filter);
2235 
2236                 unlock_user_struct(tfilter, tfprog->filter, 1);
2237                 unlock_user_struct(tfprog, optval_addr, 1);
2238                 return ret;
2239         }
2240 	case TARGET_SO_BINDTODEVICE:
2241 	{
2242 		char *dev_ifname, *addr_ifname;
2243 
2244 		if (optlen > IFNAMSIZ - 1) {
2245 		    optlen = IFNAMSIZ - 1;
2246 		}
2247 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2248 		if (!dev_ifname) {
2249 		    return -TARGET_EFAULT;
2250 		}
2251 		optname = SO_BINDTODEVICE;
2252 		addr_ifname = alloca(IFNAMSIZ);
2253 		memcpy(addr_ifname, dev_ifname, optlen);
2254 		addr_ifname[optlen] = 0;
2255 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2256                                            addr_ifname, optlen));
2257 		unlock_user (dev_ifname, optval_addr, 0);
2258 		return ret;
2259 	}
2260         case TARGET_SO_LINGER:
2261         {
2262                 struct linger lg;
2263                 struct target_linger *tlg;
2264 
2265                 if (optlen != sizeof(struct target_linger)) {
2266                     return -TARGET_EINVAL;
2267                 }
2268                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2269                     return -TARGET_EFAULT;
2270                 }
2271                 __get_user(lg.l_onoff, &tlg->l_onoff);
2272                 __get_user(lg.l_linger, &tlg->l_linger);
2273                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2274                                 &lg, sizeof(lg)));
2275                 unlock_user_struct(tlg, optval_addr, 0);
2276                 return ret;
2277         }
2278             /* Options with 'int' argument.  */
2279         case TARGET_SO_DEBUG:
2280 		optname = SO_DEBUG;
2281 		break;
2282         case TARGET_SO_REUSEADDR:
2283 		optname = SO_REUSEADDR;
2284 		break;
2285 #ifdef SO_REUSEPORT
2286         case TARGET_SO_REUSEPORT:
2287                 optname = SO_REUSEPORT;
2288                 break;
2289 #endif
2290         case TARGET_SO_TYPE:
2291 		optname = SO_TYPE;
2292 		break;
2293         case TARGET_SO_ERROR:
2294 		optname = SO_ERROR;
2295 		break;
2296         case TARGET_SO_DONTROUTE:
2297 		optname = SO_DONTROUTE;
2298 		break;
2299         case TARGET_SO_BROADCAST:
2300 		optname = SO_BROADCAST;
2301 		break;
2302         case TARGET_SO_SNDBUF:
2303 		optname = SO_SNDBUF;
2304 		break;
2305         case TARGET_SO_SNDBUFFORCE:
2306                 optname = SO_SNDBUFFORCE;
2307                 break;
2308         case TARGET_SO_RCVBUF:
2309 		optname = SO_RCVBUF;
2310 		break;
2311         case TARGET_SO_RCVBUFFORCE:
2312                 optname = SO_RCVBUFFORCE;
2313                 break;
2314         case TARGET_SO_KEEPALIVE:
2315 		optname = SO_KEEPALIVE;
2316 		break;
2317         case TARGET_SO_OOBINLINE:
2318 		optname = SO_OOBINLINE;
2319 		break;
2320         case TARGET_SO_NO_CHECK:
2321 		optname = SO_NO_CHECK;
2322 		break;
2323         case TARGET_SO_PRIORITY:
2324 		optname = SO_PRIORITY;
2325 		break;
2326 #ifdef SO_BSDCOMPAT
2327         case TARGET_SO_BSDCOMPAT:
2328 		optname = SO_BSDCOMPAT;
2329 		break;
2330 #endif
2331         case TARGET_SO_PASSCRED:
2332 		optname = SO_PASSCRED;
2333 		break;
2334         case TARGET_SO_PASSSEC:
2335                 optname = SO_PASSSEC;
2336                 break;
2337         case TARGET_SO_TIMESTAMP:
2338 		optname = SO_TIMESTAMP;
2339 		break;
2340         case TARGET_SO_RCVLOWAT:
2341 		optname = SO_RCVLOWAT;
2342 		break;
2343         default:
2344             goto unimplemented;
2345         }
2346 	if (optlen < sizeof(uint32_t))
2347             return -TARGET_EINVAL;
2348 
2349 	if (get_user_u32(val, optval_addr))
2350             return -TARGET_EFAULT;
2351 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2352         break;
2353 #ifdef SOL_NETLINK
2354     case SOL_NETLINK:
2355         switch (optname) {
2356         case NETLINK_PKTINFO:
2357         case NETLINK_ADD_MEMBERSHIP:
2358         case NETLINK_DROP_MEMBERSHIP:
2359         case NETLINK_BROADCAST_ERROR:
2360         case NETLINK_NO_ENOBUFS:
2361 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2362         case NETLINK_LISTEN_ALL_NSID:
2363         case NETLINK_CAP_ACK:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2366         case NETLINK_EXT_ACK:
2367 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2368 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2369         case NETLINK_GET_STRICT_CHK:
2370 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2371             break;
2372         default:
2373             goto unimplemented;
2374         }
2375         val = 0;
2376         if (optlen < sizeof(uint32_t)) {
2377             return -TARGET_EINVAL;
2378         }
2379         if (get_user_u32(val, optval_addr)) {
2380             return -TARGET_EFAULT;
2381         }
2382         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2383                                    sizeof(val)));
2384         break;
2385 #endif /* SOL_NETLINK */
2386     default:
2387     unimplemented:
2388         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2389                       level, optname);
2390         ret = -TARGET_ENOPROTOOPT;
2391     }
2392     return ret;
2393 }
2394 
2395 /* do_getsockopt() Must return target values and target errnos. */
2396 static abi_long do_getsockopt(int sockfd, int level, int optname,
2397                               abi_ulong optval_addr, abi_ulong optlen)
2398 {
2399     abi_long ret;
2400     int len, val;
2401     socklen_t lv;
2402 
2403     switch(level) {
2404     case TARGET_SOL_SOCKET:
2405         level = SOL_SOCKET;
2406         switch (optname) {
2407         /* These don't just return a single integer */
2408         case TARGET_SO_PEERNAME:
2409             goto unimplemented;
2410         case TARGET_SO_RCVTIMEO: {
2411             struct timeval tv;
2412             socklen_t tvlen;
2413 
2414             optname = SO_RCVTIMEO;
2415 
2416 get_timeout:
2417             if (get_user_u32(len, optlen)) {
2418                 return -TARGET_EFAULT;
2419             }
2420             if (len < 0) {
2421                 return -TARGET_EINVAL;
2422             }
2423 
2424             tvlen = sizeof(tv);
2425             ret = get_errno(getsockopt(sockfd, level, optname,
2426                                        &tv, &tvlen));
2427             if (ret < 0) {
2428                 return ret;
2429             }
2430             if (len > sizeof(struct target_timeval)) {
2431                 len = sizeof(struct target_timeval);
2432             }
2433             if (copy_to_user_timeval(optval_addr, &tv)) {
2434                 return -TARGET_EFAULT;
2435             }
2436             if (put_user_u32(len, optlen)) {
2437                 return -TARGET_EFAULT;
2438             }
2439             break;
2440         }
2441         case TARGET_SO_SNDTIMEO:
2442             optname = SO_SNDTIMEO;
2443             goto get_timeout;
2444         case TARGET_SO_PEERCRED: {
2445             struct ucred cr;
2446             socklen_t crlen;
2447             struct target_ucred *tcr;
2448 
2449             if (get_user_u32(len, optlen)) {
2450                 return -TARGET_EFAULT;
2451             }
2452             if (len < 0) {
2453                 return -TARGET_EINVAL;
2454             }
2455 
2456             crlen = sizeof(cr);
2457             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2458                                        &cr, &crlen));
2459             if (ret < 0) {
2460                 return ret;
2461             }
2462             if (len > crlen) {
2463                 len = crlen;
2464             }
2465             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2466                 return -TARGET_EFAULT;
2467             }
2468             __put_user(cr.pid, &tcr->pid);
2469             __put_user(cr.uid, &tcr->uid);
2470             __put_user(cr.gid, &tcr->gid);
2471             unlock_user_struct(tcr, optval_addr, 1);
2472             if (put_user_u32(len, optlen)) {
2473                 return -TARGET_EFAULT;
2474             }
2475             break;
2476         }
2477         case TARGET_SO_PEERSEC: {
2478             char *name;
2479 
2480             if (get_user_u32(len, optlen)) {
2481                 return -TARGET_EFAULT;
2482             }
2483             if (len < 0) {
2484                 return -TARGET_EINVAL;
2485             }
2486             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2487             if (!name) {
2488                 return -TARGET_EFAULT;
2489             }
2490             lv = len;
2491             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2492                                        name, &lv));
2493             if (put_user_u32(lv, optlen)) {
2494                 ret = -TARGET_EFAULT;
2495             }
2496             unlock_user(name, optval_addr, lv);
2497             break;
2498         }
2499         case TARGET_SO_LINGER:
2500         {
2501             struct linger lg;
2502             socklen_t lglen;
2503             struct target_linger *tlg;
2504 
2505             if (get_user_u32(len, optlen)) {
2506                 return -TARGET_EFAULT;
2507             }
2508             if (len < 0) {
2509                 return -TARGET_EINVAL;
2510             }
2511 
2512             lglen = sizeof(lg);
2513             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2514                                        &lg, &lglen));
2515             if (ret < 0) {
2516                 return ret;
2517             }
2518             if (len > lglen) {
2519                 len = lglen;
2520             }
2521             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2522                 return -TARGET_EFAULT;
2523             }
2524             __put_user(lg.l_onoff, &tlg->l_onoff);
2525             __put_user(lg.l_linger, &tlg->l_linger);
2526             unlock_user_struct(tlg, optval_addr, 1);
2527             if (put_user_u32(len, optlen)) {
2528                 return -TARGET_EFAULT;
2529             }
2530             break;
2531         }
2532         /* Options with 'int' argument.  */
2533         case TARGET_SO_DEBUG:
2534             optname = SO_DEBUG;
2535             goto int_case;
2536         case TARGET_SO_REUSEADDR:
2537             optname = SO_REUSEADDR;
2538             goto int_case;
2539 #ifdef SO_REUSEPORT
2540         case TARGET_SO_REUSEPORT:
2541             optname = SO_REUSEPORT;
2542             goto int_case;
2543 #endif
2544         case TARGET_SO_TYPE:
2545             optname = SO_TYPE;
2546             goto int_case;
2547         case TARGET_SO_ERROR:
2548             optname = SO_ERROR;
2549             goto int_case;
2550         case TARGET_SO_DONTROUTE:
2551             optname = SO_DONTROUTE;
2552             goto int_case;
2553         case TARGET_SO_BROADCAST:
2554             optname = SO_BROADCAST;
2555             goto int_case;
2556         case TARGET_SO_SNDBUF:
2557             optname = SO_SNDBUF;
2558             goto int_case;
2559         case TARGET_SO_RCVBUF:
2560             optname = SO_RCVBUF;
2561             goto int_case;
2562         case TARGET_SO_KEEPALIVE:
2563             optname = SO_KEEPALIVE;
2564             goto int_case;
2565         case TARGET_SO_OOBINLINE:
2566             optname = SO_OOBINLINE;
2567             goto int_case;
2568         case TARGET_SO_NO_CHECK:
2569             optname = SO_NO_CHECK;
2570             goto int_case;
2571         case TARGET_SO_PRIORITY:
2572             optname = SO_PRIORITY;
2573             goto int_case;
2574 #ifdef SO_BSDCOMPAT
2575         case TARGET_SO_BSDCOMPAT:
2576             optname = SO_BSDCOMPAT;
2577             goto int_case;
2578 #endif
2579         case TARGET_SO_PASSCRED:
2580             optname = SO_PASSCRED;
2581             goto int_case;
2582         case TARGET_SO_TIMESTAMP:
2583             optname = SO_TIMESTAMP;
2584             goto int_case;
2585         case TARGET_SO_RCVLOWAT:
2586             optname = SO_RCVLOWAT;
2587             goto int_case;
2588         case TARGET_SO_ACCEPTCONN:
2589             optname = SO_ACCEPTCONN;
2590             goto int_case;
2591         default:
2592             goto int_case;
2593         }
2594         break;
2595     case SOL_TCP:
2596         /* TCP options all take an 'int' value.  */
2597     int_case:
2598         if (get_user_u32(len, optlen))
2599             return -TARGET_EFAULT;
2600         if (len < 0)
2601             return -TARGET_EINVAL;
2602         lv = sizeof(lv);
2603         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2604         if (ret < 0)
2605             return ret;
2606         if (optname == SO_TYPE) {
2607             val = host_to_target_sock_type(val);
2608         }
2609         if (len > lv)
2610             len = lv;
2611         if (len == 4) {
2612             if (put_user_u32(val, optval_addr))
2613                 return -TARGET_EFAULT;
2614         } else {
2615             if (put_user_u8(val, optval_addr))
2616                 return -TARGET_EFAULT;
2617         }
2618         if (put_user_u32(len, optlen))
2619             return -TARGET_EFAULT;
2620         break;
2621     case SOL_IP:
2622         switch(optname) {
2623         case IP_TOS:
2624         case IP_TTL:
2625         case IP_HDRINCL:
2626         case IP_ROUTER_ALERT:
2627         case IP_RECVOPTS:
2628         case IP_RETOPTS:
2629         case IP_PKTINFO:
2630         case IP_MTU_DISCOVER:
2631         case IP_RECVERR:
2632         case IP_RECVTOS:
2633 #ifdef IP_FREEBIND
2634         case IP_FREEBIND:
2635 #endif
2636         case IP_MULTICAST_TTL:
2637         case IP_MULTICAST_LOOP:
2638             if (get_user_u32(len, optlen))
2639                 return -TARGET_EFAULT;
2640             if (len < 0)
2641                 return -TARGET_EINVAL;
2642             lv = sizeof(lv);
2643             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2644             if (ret < 0)
2645                 return ret;
2646             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2647                 len = 1;
2648                 if (put_user_u32(len, optlen)
2649                     || put_user_u8(val, optval_addr))
2650                     return -TARGET_EFAULT;
2651             } else {
2652                 if (len > sizeof(int))
2653                     len = sizeof(int);
2654                 if (put_user_u32(len, optlen)
2655                     || put_user_u32(val, optval_addr))
2656                     return -TARGET_EFAULT;
2657             }
2658             break;
2659         default:
2660             ret = -TARGET_ENOPROTOOPT;
2661             break;
2662         }
2663         break;
2664     case SOL_IPV6:
2665         switch (optname) {
2666         case IPV6_MTU_DISCOVER:
2667         case IPV6_MTU:
2668         case IPV6_V6ONLY:
2669         case IPV6_RECVPKTINFO:
2670         case IPV6_UNICAST_HOPS:
2671         case IPV6_MULTICAST_HOPS:
2672         case IPV6_MULTICAST_LOOP:
2673         case IPV6_RECVERR:
2674         case IPV6_RECVHOPLIMIT:
2675         case IPV6_2292HOPLIMIT:
2676         case IPV6_CHECKSUM:
2677         case IPV6_ADDRFORM:
2678         case IPV6_2292PKTINFO:
2679         case IPV6_RECVTCLASS:
2680         case IPV6_RECVRTHDR:
2681         case IPV6_2292RTHDR:
2682         case IPV6_RECVHOPOPTS:
2683         case IPV6_2292HOPOPTS:
2684         case IPV6_RECVDSTOPTS:
2685         case IPV6_2292DSTOPTS:
2686         case IPV6_TCLASS:
2687 #ifdef IPV6_RECVPATHMTU
2688         case IPV6_RECVPATHMTU:
2689 #endif
2690 #ifdef IPV6_TRANSPARENT
2691         case IPV6_TRANSPARENT:
2692 #endif
2693 #ifdef IPV6_FREEBIND
2694         case IPV6_FREEBIND:
2695 #endif
2696 #ifdef IPV6_RECVORIGDSTADDR
2697         case IPV6_RECVORIGDSTADDR:
2698 #endif
2699             if (get_user_u32(len, optlen))
2700                 return -TARGET_EFAULT;
2701             if (len < 0)
2702                 return -TARGET_EINVAL;
2703             lv = sizeof(lv);
2704             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2705             if (ret < 0)
2706                 return ret;
2707             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2708                 len = 1;
2709                 if (put_user_u32(len, optlen)
2710                     || put_user_u8(val, optval_addr))
2711                     return -TARGET_EFAULT;
2712             } else {
2713                 if (len > sizeof(int))
2714                     len = sizeof(int);
2715                 if (put_user_u32(len, optlen)
2716                     || put_user_u32(val, optval_addr))
2717                     return -TARGET_EFAULT;
2718             }
2719             break;
2720         default:
2721             ret = -TARGET_ENOPROTOOPT;
2722             break;
2723         }
2724         break;
2725 #ifdef SOL_NETLINK
2726     case SOL_NETLINK:
2727         switch (optname) {
2728         case NETLINK_PKTINFO:
2729         case NETLINK_BROADCAST_ERROR:
2730         case NETLINK_NO_ENOBUFS:
2731 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2732         case NETLINK_LISTEN_ALL_NSID:
2733         case NETLINK_CAP_ACK:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2736         case NETLINK_EXT_ACK:
2737 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2738 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2739         case NETLINK_GET_STRICT_CHK:
2740 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2741             if (get_user_u32(len, optlen)) {
2742                 return -TARGET_EFAULT;
2743             }
2744             if (len != sizeof(val)) {
2745                 return -TARGET_EINVAL;
2746             }
2747             lv = len;
2748             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2749             if (ret < 0) {
2750                 return ret;
2751             }
2752             if (put_user_u32(lv, optlen)
2753                 || put_user_u32(val, optval_addr)) {
2754                 return -TARGET_EFAULT;
2755             }
2756             break;
2757 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2758         case NETLINK_LIST_MEMBERSHIPS:
2759         {
2760             uint32_t *results;
2761             int i;
2762             if (get_user_u32(len, optlen)) {
2763                 return -TARGET_EFAULT;
2764             }
2765             if (len < 0) {
2766                 return -TARGET_EINVAL;
2767             }
2768             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2769             if (!results) {
2770                 return -TARGET_EFAULT;
2771             }
2772             lv = len;
2773             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2774             if (ret < 0) {
2775                 unlock_user(results, optval_addr, 0);
2776                 return ret;
2777             }
2778             /* swap host endianess to target endianess. */
2779             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2780                 results[i] = tswap32(results[i]);
2781             }
2782             if (put_user_u32(lv, optlen)) {
2783                 return -TARGET_EFAULT;
2784             }
2785             unlock_user(results, optval_addr, 0);
2786             break;
2787         }
2788 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2789         default:
2790             goto unimplemented;
2791         }
2792         break;
2793 #endif /* SOL_NETLINK */
2794     default:
2795     unimplemented:
2796         qemu_log_mask(LOG_UNIMP,
2797                       "getsockopt level=%d optname=%d not yet supported\n",
2798                       level, optname);
2799         ret = -TARGET_EOPNOTSUPP;
2800         break;
2801     }
2802     return ret;
2803 }
2804 
2805 /* Convert target low/high pair representing file offset into the host
2806  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2807  * as the kernel doesn't handle them either.
2808  */
2809 static void target_to_host_low_high(abi_ulong tlow,
2810                                     abi_ulong thigh,
2811                                     unsigned long *hlow,
2812                                     unsigned long *hhigh)
2813 {
2814     uint64_t off = tlow |
2815         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2816         TARGET_LONG_BITS / 2;
2817 
2818     *hlow = off;
2819     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2820 }
2821 
2822 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2823                                 abi_ulong count, int copy)
2824 {
2825     struct target_iovec *target_vec;
2826     struct iovec *vec;
2827     abi_ulong total_len, max_len;
2828     int i;
2829     int err = 0;
2830     bool bad_address = false;
2831 
2832     if (count == 0) {
2833         errno = 0;
2834         return NULL;
2835     }
2836     if (count > IOV_MAX) {
2837         errno = EINVAL;
2838         return NULL;
2839     }
2840 
2841     vec = g_try_new0(struct iovec, count);
2842     if (vec == NULL) {
2843         errno = ENOMEM;
2844         return NULL;
2845     }
2846 
2847     target_vec = lock_user(VERIFY_READ, target_addr,
2848                            count * sizeof(struct target_iovec), 1);
2849     if (target_vec == NULL) {
2850         err = EFAULT;
2851         goto fail2;
2852     }
2853 
2854     /* ??? If host page size > target page size, this will result in a
2855        value larger than what we can actually support.  */
2856     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2857     total_len = 0;
2858 
2859     for (i = 0; i < count; i++) {
2860         abi_ulong base = tswapal(target_vec[i].iov_base);
2861         abi_long len = tswapal(target_vec[i].iov_len);
2862 
2863         if (len < 0) {
2864             err = EINVAL;
2865             goto fail;
2866         } else if (len == 0) {
2867             /* Zero length pointer is ignored.  */
2868             vec[i].iov_base = 0;
2869         } else {
2870             vec[i].iov_base = lock_user(type, base, len, copy);
2871             /* If the first buffer pointer is bad, this is a fault.  But
2872              * subsequent bad buffers will result in a partial write; this
2873              * is realized by filling the vector with null pointers and
2874              * zero lengths. */
2875             if (!vec[i].iov_base) {
2876                 if (i == 0) {
2877                     err = EFAULT;
2878                     goto fail;
2879                 } else {
2880                     bad_address = true;
2881                 }
2882             }
2883             if (bad_address) {
2884                 len = 0;
2885             }
2886             if (len > max_len - total_len) {
2887                 len = max_len - total_len;
2888             }
2889         }
2890         vec[i].iov_len = len;
2891         total_len += len;
2892     }
2893 
2894     unlock_user(target_vec, target_addr, 0);
2895     return vec;
2896 
2897  fail:
2898     while (--i >= 0) {
2899         if (tswapal(target_vec[i].iov_len) > 0) {
2900             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2901         }
2902     }
2903     unlock_user(target_vec, target_addr, 0);
2904  fail2:
2905     g_free(vec);
2906     errno = err;
2907     return NULL;
2908 }
2909 
2910 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2911                          abi_ulong count, int copy)
2912 {
2913     struct target_iovec *target_vec;
2914     int i;
2915 
2916     target_vec = lock_user(VERIFY_READ, target_addr,
2917                            count * sizeof(struct target_iovec), 1);
2918     if (target_vec) {
2919         for (i = 0; i < count; i++) {
2920             abi_ulong base = tswapal(target_vec[i].iov_base);
2921             abi_long len = tswapal(target_vec[i].iov_len);
2922             if (len < 0) {
2923                 break;
2924             }
2925             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2926         }
2927         unlock_user(target_vec, target_addr, 0);
2928     }
2929 
2930     g_free(vec);
2931 }
2932 
2933 static inline int target_to_host_sock_type(int *type)
2934 {
2935     int host_type = 0;
2936     int target_type = *type;
2937 
2938     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2939     case TARGET_SOCK_DGRAM:
2940         host_type = SOCK_DGRAM;
2941         break;
2942     case TARGET_SOCK_STREAM:
2943         host_type = SOCK_STREAM;
2944         break;
2945     default:
2946         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2947         break;
2948     }
2949     if (target_type & TARGET_SOCK_CLOEXEC) {
2950 #if defined(SOCK_CLOEXEC)
2951         host_type |= SOCK_CLOEXEC;
2952 #else
2953         return -TARGET_EINVAL;
2954 #endif
2955     }
2956     if (target_type & TARGET_SOCK_NONBLOCK) {
2957 #if defined(SOCK_NONBLOCK)
2958         host_type |= SOCK_NONBLOCK;
2959 #elif !defined(O_NONBLOCK)
2960         return -TARGET_EINVAL;
2961 #endif
2962     }
2963     *type = host_type;
2964     return 0;
2965 }
2966 
2967 /* Try to emulate socket type flags after socket creation.  */
2968 static int sock_flags_fixup(int fd, int target_type)
2969 {
2970 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2971     if (target_type & TARGET_SOCK_NONBLOCK) {
2972         int flags = fcntl(fd, F_GETFL);
2973         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2974             close(fd);
2975             return -TARGET_EINVAL;
2976         }
2977     }
2978 #endif
2979     return fd;
2980 }
2981 
2982 /* do_socket() Must return target values and target errnos. */
2983 static abi_long do_socket(int domain, int type, int protocol)
2984 {
2985     int target_type = type;
2986     int ret;
2987 
2988     ret = target_to_host_sock_type(&type);
2989     if (ret) {
2990         return ret;
2991     }
2992 
2993     if (domain == PF_NETLINK && !(
2994 #ifdef CONFIG_RTNETLINK
2995          protocol == NETLINK_ROUTE ||
2996 #endif
2997          protocol == NETLINK_KOBJECT_UEVENT ||
2998          protocol == NETLINK_AUDIT)) {
2999         return -TARGET_EPROTONOSUPPORT;
3000     }
3001 
3002     if (domain == AF_PACKET ||
3003         (domain == AF_INET && type == SOCK_PACKET)) {
3004         protocol = tswap16(protocol);
3005     }
3006 
3007     ret = get_errno(socket(domain, type, protocol));
3008     if (ret >= 0) {
3009         ret = sock_flags_fixup(ret, target_type);
3010         if (type == SOCK_PACKET) {
3011             /* Manage an obsolete case :
3012              * if socket type is SOCK_PACKET, bind by name
3013              */
3014             fd_trans_register(ret, &target_packet_trans);
3015         } else if (domain == PF_NETLINK) {
3016             switch (protocol) {
3017 #ifdef CONFIG_RTNETLINK
3018             case NETLINK_ROUTE:
3019                 fd_trans_register(ret, &target_netlink_route_trans);
3020                 break;
3021 #endif
3022             case NETLINK_KOBJECT_UEVENT:
3023                 /* nothing to do: messages are strings */
3024                 break;
3025             case NETLINK_AUDIT:
3026                 fd_trans_register(ret, &target_netlink_audit_trans);
3027                 break;
3028             default:
3029                 g_assert_not_reached();
3030             }
3031         }
3032     }
3033     return ret;
3034 }
3035 
3036 /* do_bind() Must return target values and target errnos. */
3037 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3038                         socklen_t addrlen)
3039 {
3040     void *addr;
3041     abi_long ret;
3042 
3043     if ((int)addrlen < 0) {
3044         return -TARGET_EINVAL;
3045     }
3046 
3047     addr = alloca(addrlen+1);
3048 
3049     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3050     if (ret)
3051         return ret;
3052 
3053     return get_errno(bind(sockfd, addr, addrlen));
3054 }
3055 
3056 /* do_connect() Must return target values and target errnos. */
3057 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3058                            socklen_t addrlen)
3059 {
3060     void *addr;
3061     abi_long ret;
3062 
3063     if ((int)addrlen < 0) {
3064         return -TARGET_EINVAL;
3065     }
3066 
3067     addr = alloca(addrlen+1);
3068 
3069     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3070     if (ret)
3071         return ret;
3072 
3073     return get_errno(safe_connect(sockfd, addr, addrlen));
3074 }
3075 
3076 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3077 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3078                                       int flags, int send)
3079 {
3080     abi_long ret, len;
3081     struct msghdr msg;
3082     abi_ulong count;
3083     struct iovec *vec;
3084     abi_ulong target_vec;
3085 
3086     if (msgp->msg_name) {
3087         msg.msg_namelen = tswap32(msgp->msg_namelen);
3088         msg.msg_name = alloca(msg.msg_namelen+1);
3089         ret = target_to_host_sockaddr(fd, msg.msg_name,
3090                                       tswapal(msgp->msg_name),
3091                                       msg.msg_namelen);
3092         if (ret == -TARGET_EFAULT) {
3093             /* For connected sockets msg_name and msg_namelen must
3094              * be ignored, so returning EFAULT immediately is wrong.
3095              * Instead, pass a bad msg_name to the host kernel, and
3096              * let it decide whether to return EFAULT or not.
3097              */
3098             msg.msg_name = (void *)-1;
3099         } else if (ret) {
3100             goto out2;
3101         }
3102     } else {
3103         msg.msg_name = NULL;
3104         msg.msg_namelen = 0;
3105     }
3106     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3107     msg.msg_control = alloca(msg.msg_controllen);
3108     memset(msg.msg_control, 0, msg.msg_controllen);
3109 
3110     msg.msg_flags = tswap32(msgp->msg_flags);
3111 
3112     count = tswapal(msgp->msg_iovlen);
3113     target_vec = tswapal(msgp->msg_iov);
3114 
3115     if (count > IOV_MAX) {
3116         /* sendrcvmsg returns a different errno for this condition than
3117          * readv/writev, so we must catch it here before lock_iovec() does.
3118          */
3119         ret = -TARGET_EMSGSIZE;
3120         goto out2;
3121     }
3122 
3123     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3124                      target_vec, count, send);
3125     if (vec == NULL) {
3126         ret = -host_to_target_errno(errno);
3127         goto out2;
3128     }
3129     msg.msg_iovlen = count;
3130     msg.msg_iov = vec;
3131 
3132     if (send) {
3133         if (fd_trans_target_to_host_data(fd)) {
3134             void *host_msg;
3135 
3136             host_msg = g_malloc(msg.msg_iov->iov_len);
3137             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3138             ret = fd_trans_target_to_host_data(fd)(host_msg,
3139                                                    msg.msg_iov->iov_len);
3140             if (ret >= 0) {
3141                 msg.msg_iov->iov_base = host_msg;
3142                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3143             }
3144             g_free(host_msg);
3145         } else {
3146             ret = target_to_host_cmsg(&msg, msgp);
3147             if (ret == 0) {
3148                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3149             }
3150         }
3151     } else {
3152         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3153         if (!is_error(ret)) {
3154             len = ret;
3155             if (fd_trans_host_to_target_data(fd)) {
3156                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3157                                                MIN(msg.msg_iov->iov_len, len));
3158             } else {
3159                 ret = host_to_target_cmsg(msgp, &msg);
3160             }
3161             if (!is_error(ret)) {
3162                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3163                 msgp->msg_flags = tswap32(msg.msg_flags);
3164                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3165                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3166                                     msg.msg_name, msg.msg_namelen);
3167                     if (ret) {
3168                         goto out;
3169                     }
3170                 }
3171 
3172                 ret = len;
3173             }
3174         }
3175     }
3176 
3177 out:
3178     unlock_iovec(vec, target_vec, count, !send);
3179 out2:
3180     return ret;
3181 }
3182 
3183 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3184                                int flags, int send)
3185 {
3186     abi_long ret;
3187     struct target_msghdr *msgp;
3188 
3189     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3190                           msgp,
3191                           target_msg,
3192                           send ? 1 : 0)) {
3193         return -TARGET_EFAULT;
3194     }
3195     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3196     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3197     return ret;
3198 }
3199 
3200 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3201  * so it might not have this *mmsg-specific flag either.
3202  */
3203 #ifndef MSG_WAITFORONE
3204 #define MSG_WAITFORONE 0x10000
3205 #endif
3206 
3207 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3208                                 unsigned int vlen, unsigned int flags,
3209                                 int send)
3210 {
3211     struct target_mmsghdr *mmsgp;
3212     abi_long ret = 0;
3213     int i;
3214 
3215     if (vlen > UIO_MAXIOV) {
3216         vlen = UIO_MAXIOV;
3217     }
3218 
3219     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3220     if (!mmsgp) {
3221         return -TARGET_EFAULT;
3222     }
3223 
3224     for (i = 0; i < vlen; i++) {
3225         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3226         if (is_error(ret)) {
3227             break;
3228         }
3229         mmsgp[i].msg_len = tswap32(ret);
3230         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3231         if (flags & MSG_WAITFORONE) {
3232             flags |= MSG_DONTWAIT;
3233         }
3234     }
3235 
3236     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3237 
3238     /* Return number of datagrams sent if we sent any at all;
3239      * otherwise return the error.
3240      */
3241     if (i) {
3242         return i;
3243     }
3244     return ret;
3245 }
3246 
3247 /* do_accept4() Must return target values and target errnos. */
3248 static abi_long do_accept4(int fd, abi_ulong target_addr,
3249                            abi_ulong target_addrlen_addr, int flags)
3250 {
3251     socklen_t addrlen, ret_addrlen;
3252     void *addr;
3253     abi_long ret;
3254     int host_flags;
3255 
3256     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3257 
3258     if (target_addr == 0) {
3259         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3260     }
3261 
3262     /* linux returns EINVAL if addrlen pointer is invalid */
3263     if (get_user_u32(addrlen, target_addrlen_addr))
3264         return -TARGET_EINVAL;
3265 
3266     if ((int)addrlen < 0) {
3267         return -TARGET_EINVAL;
3268     }
3269 
3270     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3271         return -TARGET_EINVAL;
3272 
3273     addr = alloca(addrlen);
3274 
3275     ret_addrlen = addrlen;
3276     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3277     if (!is_error(ret)) {
3278         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3279         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3280             ret = -TARGET_EFAULT;
3281         }
3282     }
3283     return ret;
3284 }
3285 
3286 /* do_getpeername() Must return target values and target errnos. */
3287 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3288                                abi_ulong target_addrlen_addr)
3289 {
3290     socklen_t addrlen, ret_addrlen;
3291     void *addr;
3292     abi_long ret;
3293 
3294     if (get_user_u32(addrlen, target_addrlen_addr))
3295         return -TARGET_EFAULT;
3296 
3297     if ((int)addrlen < 0) {
3298         return -TARGET_EINVAL;
3299     }
3300 
3301     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3302         return -TARGET_EFAULT;
3303 
3304     addr = alloca(addrlen);
3305 
3306     ret_addrlen = addrlen;
3307     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3308     if (!is_error(ret)) {
3309         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3310         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3311             ret = -TARGET_EFAULT;
3312         }
3313     }
3314     return ret;
3315 }
3316 
3317 /* do_getsockname() Must return target values and target errnos. */
3318 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3319                                abi_ulong target_addrlen_addr)
3320 {
3321     socklen_t addrlen, ret_addrlen;
3322     void *addr;
3323     abi_long ret;
3324 
3325     if (get_user_u32(addrlen, target_addrlen_addr))
3326         return -TARGET_EFAULT;
3327 
3328     if ((int)addrlen < 0) {
3329         return -TARGET_EINVAL;
3330     }
3331 
3332     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3333         return -TARGET_EFAULT;
3334 
3335     addr = alloca(addrlen);
3336 
3337     ret_addrlen = addrlen;
3338     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3339     if (!is_error(ret)) {
3340         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3341         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3342             ret = -TARGET_EFAULT;
3343         }
3344     }
3345     return ret;
3346 }
3347 
3348 /* do_socketpair() Must return target values and target errnos. */
3349 static abi_long do_socketpair(int domain, int type, int protocol,
3350                               abi_ulong target_tab_addr)
3351 {
3352     int tab[2];
3353     abi_long ret;
3354 
3355     target_to_host_sock_type(&type);
3356 
3357     ret = get_errno(socketpair(domain, type, protocol, tab));
3358     if (!is_error(ret)) {
3359         if (put_user_s32(tab[0], target_tab_addr)
3360             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3361             ret = -TARGET_EFAULT;
3362     }
3363     return ret;
3364 }
3365 
3366 /* do_sendto() Must return target values and target errnos. */
3367 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3368                           abi_ulong target_addr, socklen_t addrlen)
3369 {
3370     void *addr;
3371     void *host_msg;
3372     void *copy_msg = NULL;
3373     abi_long ret;
3374 
3375     if ((int)addrlen < 0) {
3376         return -TARGET_EINVAL;
3377     }
3378 
3379     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3380     if (!host_msg)
3381         return -TARGET_EFAULT;
3382     if (fd_trans_target_to_host_data(fd)) {
3383         copy_msg = host_msg;
3384         host_msg = g_malloc(len);
3385         memcpy(host_msg, copy_msg, len);
3386         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3387         if (ret < 0) {
3388             goto fail;
3389         }
3390     }
3391     if (target_addr) {
3392         addr = alloca(addrlen+1);
3393         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3394         if (ret) {
3395             goto fail;
3396         }
3397         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3398     } else {
3399         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3400     }
3401 fail:
3402     if (copy_msg) {
3403         g_free(host_msg);
3404         host_msg = copy_msg;
3405     }
3406     unlock_user(host_msg, msg, 0);
3407     return ret;
3408 }
3409 
3410 /* do_recvfrom() Must return target values and target errnos. */
3411 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3412                             abi_ulong target_addr,
3413                             abi_ulong target_addrlen)
3414 {
3415     socklen_t addrlen, ret_addrlen;
3416     void *addr;
3417     void *host_msg;
3418     abi_long ret;
3419 
3420     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3421     if (!host_msg)
3422         return -TARGET_EFAULT;
3423     if (target_addr) {
3424         if (get_user_u32(addrlen, target_addrlen)) {
3425             ret = -TARGET_EFAULT;
3426             goto fail;
3427         }
3428         if ((int)addrlen < 0) {
3429             ret = -TARGET_EINVAL;
3430             goto fail;
3431         }
3432         addr = alloca(addrlen);
3433         ret_addrlen = addrlen;
3434         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3435                                       addr, &ret_addrlen));
3436     } else {
3437         addr = NULL; /* To keep compiler quiet.  */
3438         addrlen = 0; /* To keep compiler quiet.  */
3439         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3440     }
3441     if (!is_error(ret)) {
3442         if (fd_trans_host_to_target_data(fd)) {
3443             abi_long trans;
3444             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3445             if (is_error(trans)) {
3446                 ret = trans;
3447                 goto fail;
3448             }
3449         }
3450         if (target_addr) {
3451             host_to_target_sockaddr(target_addr, addr,
3452                                     MIN(addrlen, ret_addrlen));
3453             if (put_user_u32(ret_addrlen, target_addrlen)) {
3454                 ret = -TARGET_EFAULT;
3455                 goto fail;
3456             }
3457         }
3458         unlock_user(host_msg, msg, len);
3459     } else {
3460 fail:
3461         unlock_user(host_msg, msg, 0);
3462     }
3463     return ret;
3464 }
3465 
3466 #ifdef TARGET_NR_socketcall
3467 /* do_socketcall() must return target values and target errnos. */
3468 static abi_long do_socketcall(int num, abi_ulong vptr)
3469 {
3470     static const unsigned nargs[] = { /* number of arguments per operation */
3471         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3472         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3473         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3474         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3475         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3476         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3477         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3478         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3479         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3480         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3481         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3482         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3483         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3484         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3485         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3486         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3487         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3488         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3489         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3490         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3491     };
3492     abi_long a[6]; /* max 6 args */
3493     unsigned i;
3494 
3495     /* check the range of the first argument num */
3496     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3497     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3498         return -TARGET_EINVAL;
3499     }
3500     /* ensure we have space for args */
3501     if (nargs[num] > ARRAY_SIZE(a)) {
3502         return -TARGET_EINVAL;
3503     }
3504     /* collect the arguments in a[] according to nargs[] */
3505     for (i = 0; i < nargs[num]; ++i) {
3506         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3507             return -TARGET_EFAULT;
3508         }
3509     }
3510     /* now when we have the args, invoke the appropriate underlying function */
3511     switch (num) {
3512     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3513         return do_socket(a[0], a[1], a[2]);
3514     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3515         return do_bind(a[0], a[1], a[2]);
3516     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3517         return do_connect(a[0], a[1], a[2]);
3518     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3519         return get_errno(listen(a[0], a[1]));
3520     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3521         return do_accept4(a[0], a[1], a[2], 0);
3522     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3523         return do_getsockname(a[0], a[1], a[2]);
3524     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3525         return do_getpeername(a[0], a[1], a[2]);
3526     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3527         return do_socketpair(a[0], a[1], a[2], a[3]);
3528     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3529         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3530     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3531         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3532     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3533         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3534     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3535         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3536     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3537         return get_errno(shutdown(a[0], a[1]));
3538     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3539         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3540     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3541         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3542     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3543         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3544     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3545         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3546     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3547         return do_accept4(a[0], a[1], a[2], a[3]);
3548     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3549         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3550     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3551         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3552     default:
3553         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3554         return -TARGET_EINVAL;
3555     }
3556 }
3557 #endif
3558 
3559 #define N_SHM_REGIONS	32
3560 
3561 static struct shm_region {
3562     abi_ulong start;
3563     abi_ulong size;
3564     bool in_use;
3565 } shm_regions[N_SHM_REGIONS];
3566 
3567 #ifndef TARGET_SEMID64_DS
3568 /* asm-generic version of this struct */
3569 struct target_semid64_ds
3570 {
3571   struct target_ipc_perm sem_perm;
3572   abi_ulong sem_otime;
3573 #if TARGET_ABI_BITS == 32
3574   abi_ulong __unused1;
3575 #endif
3576   abi_ulong sem_ctime;
3577 #if TARGET_ABI_BITS == 32
3578   abi_ulong __unused2;
3579 #endif
3580   abi_ulong sem_nsems;
3581   abi_ulong __unused3;
3582   abi_ulong __unused4;
3583 };
3584 #endif
3585 
3586 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3587                                                abi_ulong target_addr)
3588 {
3589     struct target_ipc_perm *target_ip;
3590     struct target_semid64_ds *target_sd;
3591 
3592     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3593         return -TARGET_EFAULT;
3594     target_ip = &(target_sd->sem_perm);
3595     host_ip->__key = tswap32(target_ip->__key);
3596     host_ip->uid = tswap32(target_ip->uid);
3597     host_ip->gid = tswap32(target_ip->gid);
3598     host_ip->cuid = tswap32(target_ip->cuid);
3599     host_ip->cgid = tswap32(target_ip->cgid);
3600 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3601     host_ip->mode = tswap32(target_ip->mode);
3602 #else
3603     host_ip->mode = tswap16(target_ip->mode);
3604 #endif
3605 #if defined(TARGET_PPC)
3606     host_ip->__seq = tswap32(target_ip->__seq);
3607 #else
3608     host_ip->__seq = tswap16(target_ip->__seq);
3609 #endif
3610     unlock_user_struct(target_sd, target_addr, 0);
3611     return 0;
3612 }
3613 
3614 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3615                                                struct ipc_perm *host_ip)
3616 {
3617     struct target_ipc_perm *target_ip;
3618     struct target_semid64_ds *target_sd;
3619 
3620     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3621         return -TARGET_EFAULT;
3622     target_ip = &(target_sd->sem_perm);
3623     target_ip->__key = tswap32(host_ip->__key);
3624     target_ip->uid = tswap32(host_ip->uid);
3625     target_ip->gid = tswap32(host_ip->gid);
3626     target_ip->cuid = tswap32(host_ip->cuid);
3627     target_ip->cgid = tswap32(host_ip->cgid);
3628 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3629     target_ip->mode = tswap32(host_ip->mode);
3630 #else
3631     target_ip->mode = tswap16(host_ip->mode);
3632 #endif
3633 #if defined(TARGET_PPC)
3634     target_ip->__seq = tswap32(host_ip->__seq);
3635 #else
3636     target_ip->__seq = tswap16(host_ip->__seq);
3637 #endif
3638     unlock_user_struct(target_sd, target_addr, 1);
3639     return 0;
3640 }
3641 
3642 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3643                                                abi_ulong target_addr)
3644 {
3645     struct target_semid64_ds *target_sd;
3646 
3647     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3648         return -TARGET_EFAULT;
3649     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3650         return -TARGET_EFAULT;
3651     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3652     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3653     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3654     unlock_user_struct(target_sd, target_addr, 0);
3655     return 0;
3656 }
3657 
3658 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3659                                                struct semid_ds *host_sd)
3660 {
3661     struct target_semid64_ds *target_sd;
3662 
3663     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3664         return -TARGET_EFAULT;
3665     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3666         return -TARGET_EFAULT;
3667     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3668     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3669     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3670     unlock_user_struct(target_sd, target_addr, 1);
3671     return 0;
3672 }
3673 
3674 struct target_seminfo {
3675     int semmap;
3676     int semmni;
3677     int semmns;
3678     int semmnu;
3679     int semmsl;
3680     int semopm;
3681     int semume;
3682     int semusz;
3683     int semvmx;
3684     int semaem;
3685 };
3686 
3687 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3688                                               struct seminfo *host_seminfo)
3689 {
3690     struct target_seminfo *target_seminfo;
3691     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3692         return -TARGET_EFAULT;
3693     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3694     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3695     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3696     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3697     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3698     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3699     __put_user(host_seminfo->semume, &target_seminfo->semume);
3700     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3701     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3702     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3703     unlock_user_struct(target_seminfo, target_addr, 1);
3704     return 0;
3705 }
3706 
3707 union semun {
3708 	int val;
3709 	struct semid_ds *buf;
3710 	unsigned short *array;
3711 	struct seminfo *__buf;
3712 };
3713 
3714 union target_semun {
3715 	int val;
3716 	abi_ulong buf;
3717 	abi_ulong array;
3718 	abi_ulong __buf;
3719 };
3720 
3721 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3722                                                abi_ulong target_addr)
3723 {
3724     int nsems;
3725     unsigned short *array;
3726     union semun semun;
3727     struct semid_ds semid_ds;
3728     int i, ret;
3729 
3730     semun.buf = &semid_ds;
3731 
3732     ret = semctl(semid, 0, IPC_STAT, semun);
3733     if (ret == -1)
3734         return get_errno(ret);
3735 
3736     nsems = semid_ds.sem_nsems;
3737 
3738     *host_array = g_try_new(unsigned short, nsems);
3739     if (!*host_array) {
3740         return -TARGET_ENOMEM;
3741     }
3742     array = lock_user(VERIFY_READ, target_addr,
3743                       nsems*sizeof(unsigned short), 1);
3744     if (!array) {
3745         g_free(*host_array);
3746         return -TARGET_EFAULT;
3747     }
3748 
3749     for(i=0; i<nsems; i++) {
3750         __get_user((*host_array)[i], &array[i]);
3751     }
3752     unlock_user(array, target_addr, 0);
3753 
3754     return 0;
3755 }
3756 
3757 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3758                                                unsigned short **host_array)
3759 {
3760     int nsems;
3761     unsigned short *array;
3762     union semun semun;
3763     struct semid_ds semid_ds;
3764     int i, ret;
3765 
3766     semun.buf = &semid_ds;
3767 
3768     ret = semctl(semid, 0, IPC_STAT, semun);
3769     if (ret == -1)
3770         return get_errno(ret);
3771 
3772     nsems = semid_ds.sem_nsems;
3773 
3774     array = lock_user(VERIFY_WRITE, target_addr,
3775                       nsems*sizeof(unsigned short), 0);
3776     if (!array)
3777         return -TARGET_EFAULT;
3778 
3779     for(i=0; i<nsems; i++) {
3780         __put_user((*host_array)[i], &array[i]);
3781     }
3782     g_free(*host_array);
3783     unlock_user(array, target_addr, 1);
3784 
3785     return 0;
3786 }
3787 
3788 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3789                                  abi_ulong target_arg)
3790 {
3791     union target_semun target_su = { .buf = target_arg };
3792     union semun arg;
3793     struct semid_ds dsarg;
3794     unsigned short *array = NULL;
3795     struct seminfo seminfo;
3796     abi_long ret = -TARGET_EINVAL;
3797     abi_long err;
3798     cmd &= 0xff;
3799 
3800     switch( cmd ) {
3801 	case GETVAL:
3802 	case SETVAL:
3803             /* In 64 bit cross-endian situations, we will erroneously pick up
3804              * the wrong half of the union for the "val" element.  To rectify
3805              * this, the entire 8-byte structure is byteswapped, followed by
3806 	     * a swap of the 4 byte val field. In other cases, the data is
3807 	     * already in proper host byte order. */
3808 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3809 		target_su.buf = tswapal(target_su.buf);
3810 		arg.val = tswap32(target_su.val);
3811 	    } else {
3812 		arg.val = target_su.val;
3813 	    }
3814             ret = get_errno(semctl(semid, semnum, cmd, arg));
3815             break;
3816 	case GETALL:
3817 	case SETALL:
3818             err = target_to_host_semarray(semid, &array, target_su.array);
3819             if (err)
3820                 return err;
3821             arg.array = array;
3822             ret = get_errno(semctl(semid, semnum, cmd, arg));
3823             err = host_to_target_semarray(semid, target_su.array, &array);
3824             if (err)
3825                 return err;
3826             break;
3827 	case IPC_STAT:
3828 	case IPC_SET:
3829 	case SEM_STAT:
3830             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3831             if (err)
3832                 return err;
3833             arg.buf = &dsarg;
3834             ret = get_errno(semctl(semid, semnum, cmd, arg));
3835             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3836             if (err)
3837                 return err;
3838             break;
3839 	case IPC_INFO:
3840 	case SEM_INFO:
3841             arg.__buf = &seminfo;
3842             ret = get_errno(semctl(semid, semnum, cmd, arg));
3843             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3844             if (err)
3845                 return err;
3846             break;
3847 	case IPC_RMID:
3848 	case GETPID:
3849 	case GETNCNT:
3850 	case GETZCNT:
3851             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3852             break;
3853     }
3854 
3855     return ret;
3856 }
3857 
3858 struct target_sembuf {
3859     unsigned short sem_num;
3860     short sem_op;
3861     short sem_flg;
3862 };
3863 
3864 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3865                                              abi_ulong target_addr,
3866                                              unsigned nsops)
3867 {
3868     struct target_sembuf *target_sembuf;
3869     int i;
3870 
3871     target_sembuf = lock_user(VERIFY_READ, target_addr,
3872                               nsops*sizeof(struct target_sembuf), 1);
3873     if (!target_sembuf)
3874         return -TARGET_EFAULT;
3875 
3876     for(i=0; i<nsops; i++) {
3877         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3878         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3879         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3880     }
3881 
3882     unlock_user(target_sembuf, target_addr, 0);
3883 
3884     return 0;
3885 }
3886 
3887 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3888     defined(TARGET_NR_semtimedop)
3889 
3890 /*
3891  * This macro is required to handle the s390 variants, which passes the
3892  * arguments in a different order than default.
3893  */
3894 #ifdef __s390x__
3895 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3896   (__nsops), (__timeout), (__sops)
3897 #else
3898 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3899   (__nsops), 0, (__sops), (__timeout)
3900 #endif
3901 
3902 static inline abi_long do_semtimedop(int semid,
3903                                      abi_long ptr,
3904                                      unsigned nsops,
3905                                      abi_long timeout)
3906 {
3907     struct sembuf sops[nsops];
3908     struct timespec ts, *pts = NULL;
3909     abi_long ret;
3910 
3911     if (timeout) {
3912         pts = &ts;
3913         if (target_to_host_timespec(pts, timeout)) {
3914             return -TARGET_EFAULT;
3915         }
3916     }
3917 
3918     if (target_to_host_sembuf(sops, ptr, nsops))
3919         return -TARGET_EFAULT;
3920 
3921     ret = -TARGET_ENOSYS;
3922 #ifdef __NR_semtimedop
3923     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3924 #endif
3925 #ifdef __NR_ipc
3926     if (ret == -TARGET_ENOSYS) {
3927         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3928                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3929     }
3930 #endif
3931     return ret;
3932 }
3933 #endif
3934 
3935 struct target_msqid_ds
3936 {
3937     struct target_ipc_perm msg_perm;
3938     abi_ulong msg_stime;
3939 #if TARGET_ABI_BITS == 32
3940     abi_ulong __unused1;
3941 #endif
3942     abi_ulong msg_rtime;
3943 #if TARGET_ABI_BITS == 32
3944     abi_ulong __unused2;
3945 #endif
3946     abi_ulong msg_ctime;
3947 #if TARGET_ABI_BITS == 32
3948     abi_ulong __unused3;
3949 #endif
3950     abi_ulong __msg_cbytes;
3951     abi_ulong msg_qnum;
3952     abi_ulong msg_qbytes;
3953     abi_ulong msg_lspid;
3954     abi_ulong msg_lrpid;
3955     abi_ulong __unused4;
3956     abi_ulong __unused5;
3957 };
3958 
3959 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3960                                                abi_ulong target_addr)
3961 {
3962     struct target_msqid_ds *target_md;
3963 
3964     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3965         return -TARGET_EFAULT;
3966     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3967         return -TARGET_EFAULT;
3968     host_md->msg_stime = tswapal(target_md->msg_stime);
3969     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3970     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3971     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3972     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3973     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3974     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3975     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3976     unlock_user_struct(target_md, target_addr, 0);
3977     return 0;
3978 }
3979 
3980 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3981                                                struct msqid_ds *host_md)
3982 {
3983     struct target_msqid_ds *target_md;
3984 
3985     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3986         return -TARGET_EFAULT;
3987     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3988         return -TARGET_EFAULT;
3989     target_md->msg_stime = tswapal(host_md->msg_stime);
3990     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3991     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3992     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3993     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3994     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3995     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3996     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3997     unlock_user_struct(target_md, target_addr, 1);
3998     return 0;
3999 }
4000 
4001 struct target_msginfo {
4002     int msgpool;
4003     int msgmap;
4004     int msgmax;
4005     int msgmnb;
4006     int msgmni;
4007     int msgssz;
4008     int msgtql;
4009     unsigned short int msgseg;
4010 };
4011 
4012 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4013                                               struct msginfo *host_msginfo)
4014 {
4015     struct target_msginfo *target_msginfo;
4016     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4017         return -TARGET_EFAULT;
4018     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4019     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4020     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4021     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4022     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4023     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4024     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4025     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4026     unlock_user_struct(target_msginfo, target_addr, 1);
4027     return 0;
4028 }
4029 
4030 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4031 {
4032     struct msqid_ds dsarg;
4033     struct msginfo msginfo;
4034     abi_long ret = -TARGET_EINVAL;
4035 
4036     cmd &= 0xff;
4037 
4038     switch (cmd) {
4039     case IPC_STAT:
4040     case IPC_SET:
4041     case MSG_STAT:
4042         if (target_to_host_msqid_ds(&dsarg,ptr))
4043             return -TARGET_EFAULT;
4044         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4045         if (host_to_target_msqid_ds(ptr,&dsarg))
4046             return -TARGET_EFAULT;
4047         break;
4048     case IPC_RMID:
4049         ret = get_errno(msgctl(msgid, cmd, NULL));
4050         break;
4051     case IPC_INFO:
4052     case MSG_INFO:
4053         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4054         if (host_to_target_msginfo(ptr, &msginfo))
4055             return -TARGET_EFAULT;
4056         break;
4057     }
4058 
4059     return ret;
4060 }
4061 
4062 struct target_msgbuf {
4063     abi_long mtype;
4064     char	mtext[1];
4065 };
4066 
4067 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4068                                  ssize_t msgsz, int msgflg)
4069 {
4070     struct target_msgbuf *target_mb;
4071     struct msgbuf *host_mb;
4072     abi_long ret = 0;
4073 
4074     if (msgsz < 0) {
4075         return -TARGET_EINVAL;
4076     }
4077 
4078     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4079         return -TARGET_EFAULT;
4080     host_mb = g_try_malloc(msgsz + sizeof(long));
4081     if (!host_mb) {
4082         unlock_user_struct(target_mb, msgp, 0);
4083         return -TARGET_ENOMEM;
4084     }
4085     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4086     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4087     ret = -TARGET_ENOSYS;
4088 #ifdef __NR_msgsnd
4089     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4090 #endif
4091 #ifdef __NR_ipc
4092     if (ret == -TARGET_ENOSYS) {
4093 #ifdef __s390x__
4094         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4095                                  host_mb));
4096 #else
4097         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4098                                  host_mb, 0));
4099 #endif
4100     }
4101 #endif
4102     g_free(host_mb);
4103     unlock_user_struct(target_mb, msgp, 0);
4104 
4105     return ret;
4106 }
4107 
4108 #ifdef __NR_ipc
4109 #if defined(__sparc__)
4110 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4111 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4112 #elif defined(__s390x__)
4113 /* The s390 sys_ipc variant has only five parameters.  */
4114 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4115     ((long int[]){(long int)__msgp, __msgtyp})
4116 #else
4117 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4118     ((long int[]){(long int)__msgp, __msgtyp}), 0
4119 #endif
4120 #endif
4121 
4122 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4123                                  ssize_t msgsz, abi_long msgtyp,
4124                                  int msgflg)
4125 {
4126     struct target_msgbuf *target_mb;
4127     char *target_mtext;
4128     struct msgbuf *host_mb;
4129     abi_long ret = 0;
4130 
4131     if (msgsz < 0) {
4132         return -TARGET_EINVAL;
4133     }
4134 
4135     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4136         return -TARGET_EFAULT;
4137 
4138     host_mb = g_try_malloc(msgsz + sizeof(long));
4139     if (!host_mb) {
4140         ret = -TARGET_ENOMEM;
4141         goto end;
4142     }
4143     ret = -TARGET_ENOSYS;
4144 #ifdef __NR_msgrcv
4145     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4146 #endif
4147 #ifdef __NR_ipc
4148     if (ret == -TARGET_ENOSYS) {
4149         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4150                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4151     }
4152 #endif
4153 
4154     if (ret > 0) {
4155         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4156         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4157         if (!target_mtext) {
4158             ret = -TARGET_EFAULT;
4159             goto end;
4160         }
4161         memcpy(target_mb->mtext, host_mb->mtext, ret);
4162         unlock_user(target_mtext, target_mtext_addr, ret);
4163     }
4164 
4165     target_mb->mtype = tswapal(host_mb->mtype);
4166 
4167 end:
4168     if (target_mb)
4169         unlock_user_struct(target_mb, msgp, 1);
4170     g_free(host_mb);
4171     return ret;
4172 }
4173 
4174 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4175                                                abi_ulong target_addr)
4176 {
4177     struct target_shmid_ds *target_sd;
4178 
4179     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4180         return -TARGET_EFAULT;
4181     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4182         return -TARGET_EFAULT;
4183     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4184     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4185     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4186     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4187     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4188     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4189     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4190     unlock_user_struct(target_sd, target_addr, 0);
4191     return 0;
4192 }
4193 
4194 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4195                                                struct shmid_ds *host_sd)
4196 {
4197     struct target_shmid_ds *target_sd;
4198 
4199     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4200         return -TARGET_EFAULT;
4201     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4202         return -TARGET_EFAULT;
4203     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4204     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4205     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4206     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4207     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4208     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4209     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4210     unlock_user_struct(target_sd, target_addr, 1);
4211     return 0;
4212 }
4213 
4214 struct  target_shminfo {
4215     abi_ulong shmmax;
4216     abi_ulong shmmin;
4217     abi_ulong shmmni;
4218     abi_ulong shmseg;
4219     abi_ulong shmall;
4220 };
4221 
4222 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4223                                               struct shminfo *host_shminfo)
4224 {
4225     struct target_shminfo *target_shminfo;
4226     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4227         return -TARGET_EFAULT;
4228     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4229     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4230     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4231     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4232     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4233     unlock_user_struct(target_shminfo, target_addr, 1);
4234     return 0;
4235 }
4236 
4237 struct target_shm_info {
4238     int used_ids;
4239     abi_ulong shm_tot;
4240     abi_ulong shm_rss;
4241     abi_ulong shm_swp;
4242     abi_ulong swap_attempts;
4243     abi_ulong swap_successes;
4244 };
4245 
4246 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4247                                                struct shm_info *host_shm_info)
4248 {
4249     struct target_shm_info *target_shm_info;
4250     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4251         return -TARGET_EFAULT;
4252     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4253     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4254     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4255     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4256     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4257     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4258     unlock_user_struct(target_shm_info, target_addr, 1);
4259     return 0;
4260 }
4261 
4262 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4263 {
4264     struct shmid_ds dsarg;
4265     struct shminfo shminfo;
4266     struct shm_info shm_info;
4267     abi_long ret = -TARGET_EINVAL;
4268 
4269     cmd &= 0xff;
4270 
4271     switch(cmd) {
4272     case IPC_STAT:
4273     case IPC_SET:
4274     case SHM_STAT:
4275         if (target_to_host_shmid_ds(&dsarg, buf))
4276             return -TARGET_EFAULT;
4277         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4278         if (host_to_target_shmid_ds(buf, &dsarg))
4279             return -TARGET_EFAULT;
4280         break;
4281     case IPC_INFO:
4282         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4283         if (host_to_target_shminfo(buf, &shminfo))
4284             return -TARGET_EFAULT;
4285         break;
4286     case SHM_INFO:
4287         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4288         if (host_to_target_shm_info(buf, &shm_info))
4289             return -TARGET_EFAULT;
4290         break;
4291     case IPC_RMID:
4292     case SHM_LOCK:
4293     case SHM_UNLOCK:
4294         ret = get_errno(shmctl(shmid, cmd, NULL));
4295         break;
4296     }
4297 
4298     return ret;
4299 }
4300 
4301 #ifndef TARGET_FORCE_SHMLBA
4302 /* For most architectures, SHMLBA is the same as the page size;
4303  * some architectures have larger values, in which case they should
4304  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4305  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4306  * and defining its own value for SHMLBA.
4307  *
4308  * The kernel also permits SHMLBA to be set by the architecture to a
4309  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4310  * this means that addresses are rounded to the large size if
4311  * SHM_RND is set but addresses not aligned to that size are not rejected
4312  * as long as they are at least page-aligned. Since the only architecture
4313  * which uses this is ia64 this code doesn't provide for that oddity.
4314  */
4315 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4316 {
4317     return TARGET_PAGE_SIZE;
4318 }
4319 #endif
4320 
4321 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4322                                  int shmid, abi_ulong shmaddr, int shmflg)
4323 {
4324     abi_long raddr;
4325     void *host_raddr;
4326     struct shmid_ds shm_info;
4327     int i,ret;
4328     abi_ulong shmlba;
4329 
4330     /* find out the length of the shared memory segment */
4331     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4332     if (is_error(ret)) {
4333         /* can't get length, bail out */
4334         return ret;
4335     }
4336 
4337     shmlba = target_shmlba(cpu_env);
4338 
4339     if (shmaddr & (shmlba - 1)) {
4340         if (shmflg & SHM_RND) {
4341             shmaddr &= ~(shmlba - 1);
4342         } else {
4343             return -TARGET_EINVAL;
4344         }
4345     }
4346     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4347         return -TARGET_EINVAL;
4348     }
4349 
4350     mmap_lock();
4351 
4352     if (shmaddr)
4353         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4354     else {
4355         abi_ulong mmap_start;
4356 
4357         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4358         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4359 
4360         if (mmap_start == -1) {
4361             errno = ENOMEM;
4362             host_raddr = (void *)-1;
4363         } else
4364             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4365     }
4366 
4367     if (host_raddr == (void *)-1) {
4368         mmap_unlock();
4369         return get_errno((long)host_raddr);
4370     }
4371     raddr=h2g((unsigned long)host_raddr);
4372 
4373     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4374                    PAGE_VALID | PAGE_READ |
4375                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4376 
4377     for (i = 0; i < N_SHM_REGIONS; i++) {
4378         if (!shm_regions[i].in_use) {
4379             shm_regions[i].in_use = true;
4380             shm_regions[i].start = raddr;
4381             shm_regions[i].size = shm_info.shm_segsz;
4382             break;
4383         }
4384     }
4385 
4386     mmap_unlock();
4387     return raddr;
4388 
4389 }
4390 
4391 static inline abi_long do_shmdt(abi_ulong shmaddr)
4392 {
4393     int i;
4394     abi_long rv;
4395 
4396     mmap_lock();
4397 
4398     for (i = 0; i < N_SHM_REGIONS; ++i) {
4399         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4400             shm_regions[i].in_use = false;
4401             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4402             break;
4403         }
4404     }
4405     rv = get_errno(shmdt(g2h(shmaddr)));
4406 
4407     mmap_unlock();
4408 
4409     return rv;
4410 }
4411 
4412 #ifdef TARGET_NR_ipc
4413 /* ??? This only works with linear mappings.  */
4414 /* do_ipc() must return target values and target errnos. */
4415 static abi_long do_ipc(CPUArchState *cpu_env,
4416                        unsigned int call, abi_long first,
4417                        abi_long second, abi_long third,
4418                        abi_long ptr, abi_long fifth)
4419 {
4420     int version;
4421     abi_long ret = 0;
4422 
4423     version = call >> 16;
4424     call &= 0xffff;
4425 
4426     switch (call) {
4427     case IPCOP_semop:
4428         ret = do_semtimedop(first, ptr, second, 0);
4429         break;
4430     case IPCOP_semtimedop:
4431     /*
4432      * The s390 sys_ipc variant has only five parameters instead of six
4433      * (as for default variant) and the only difference is the handling of
4434      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4435      * to a struct timespec where the generic variant uses fifth parameter.
4436      */
4437 #if defined(TARGET_S390X)
4438         ret = do_semtimedop(first, ptr, second, third);
4439 #else
4440         ret = do_semtimedop(first, ptr, second, fifth);
4441 #endif
4442         break;
4443 
4444     case IPCOP_semget:
4445         ret = get_errno(semget(first, second, third));
4446         break;
4447 
4448     case IPCOP_semctl: {
4449         /* The semun argument to semctl is passed by value, so dereference the
4450          * ptr argument. */
4451         abi_ulong atptr;
4452         get_user_ual(atptr, ptr);
4453         ret = do_semctl(first, second, third, atptr);
4454         break;
4455     }
4456 
4457     case IPCOP_msgget:
4458         ret = get_errno(msgget(first, second));
4459         break;
4460 
4461     case IPCOP_msgsnd:
4462         ret = do_msgsnd(first, ptr, second, third);
4463         break;
4464 
4465     case IPCOP_msgctl:
4466         ret = do_msgctl(first, second, ptr);
4467         break;
4468 
4469     case IPCOP_msgrcv:
4470         switch (version) {
4471         case 0:
4472             {
4473                 struct target_ipc_kludge {
4474                     abi_long msgp;
4475                     abi_long msgtyp;
4476                 } *tmp;
4477 
4478                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4479                     ret = -TARGET_EFAULT;
4480                     break;
4481                 }
4482 
4483                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4484 
4485                 unlock_user_struct(tmp, ptr, 0);
4486                 break;
4487             }
4488         default:
4489             ret = do_msgrcv(first, ptr, second, fifth, third);
4490         }
4491         break;
4492 
4493     case IPCOP_shmat:
4494         switch (version) {
4495         default:
4496         {
4497             abi_ulong raddr;
4498             raddr = do_shmat(cpu_env, first, ptr, second);
4499             if (is_error(raddr))
4500                 return get_errno(raddr);
4501             if (put_user_ual(raddr, third))
4502                 return -TARGET_EFAULT;
4503             break;
4504         }
4505         case 1:
4506             ret = -TARGET_EINVAL;
4507             break;
4508         }
4509 	break;
4510     case IPCOP_shmdt:
4511         ret = do_shmdt(ptr);
4512 	break;
4513 
4514     case IPCOP_shmget:
4515 	/* IPC_* flag values are the same on all linux platforms */
4516 	ret = get_errno(shmget(first, second, third));
4517 	break;
4518 
4519 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4520     case IPCOP_shmctl:
4521         ret = do_shmctl(first, second, ptr);
4522         break;
4523     default:
4524         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4525                       call, version);
4526 	ret = -TARGET_ENOSYS;
4527 	break;
4528     }
4529     return ret;
4530 }
4531 #endif
4532 
4533 /* kernel structure types definitions */
4534 
4535 #define STRUCT(name, ...) STRUCT_ ## name,
4536 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4537 enum {
4538 #include "syscall_types.h"
4539 STRUCT_MAX
4540 };
4541 #undef STRUCT
4542 #undef STRUCT_SPECIAL
4543 
4544 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4545 #define STRUCT_SPECIAL(name)
4546 #include "syscall_types.h"
4547 #undef STRUCT
4548 #undef STRUCT_SPECIAL
4549 
4550 #define MAX_STRUCT_SIZE 4096
4551 
4552 #ifdef CONFIG_FIEMAP
4553 /* So fiemap access checks don't overflow on 32 bit systems.
4554  * This is very slightly smaller than the limit imposed by
4555  * the underlying kernel.
4556  */
4557 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4558                             / sizeof(struct fiemap_extent))
4559 
4560 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4561                                        int fd, int cmd, abi_long arg)
4562 {
4563     /* The parameter for this ioctl is a struct fiemap followed
4564      * by an array of struct fiemap_extent whose size is set
4565      * in fiemap->fm_extent_count. The array is filled in by the
4566      * ioctl.
4567      */
4568     int target_size_in, target_size_out;
4569     struct fiemap *fm;
4570     const argtype *arg_type = ie->arg_type;
4571     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4572     void *argptr, *p;
4573     abi_long ret;
4574     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4575     uint32_t outbufsz;
4576     int free_fm = 0;
4577 
4578     assert(arg_type[0] == TYPE_PTR);
4579     assert(ie->access == IOC_RW);
4580     arg_type++;
4581     target_size_in = thunk_type_size(arg_type, 0);
4582     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4583     if (!argptr) {
4584         return -TARGET_EFAULT;
4585     }
4586     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4587     unlock_user(argptr, arg, 0);
4588     fm = (struct fiemap *)buf_temp;
4589     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4590         return -TARGET_EINVAL;
4591     }
4592 
4593     outbufsz = sizeof (*fm) +
4594         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4595 
4596     if (outbufsz > MAX_STRUCT_SIZE) {
4597         /* We can't fit all the extents into the fixed size buffer.
4598          * Allocate one that is large enough and use it instead.
4599          */
4600         fm = g_try_malloc(outbufsz);
4601         if (!fm) {
4602             return -TARGET_ENOMEM;
4603         }
4604         memcpy(fm, buf_temp, sizeof(struct fiemap));
4605         free_fm = 1;
4606     }
4607     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4608     if (!is_error(ret)) {
4609         target_size_out = target_size_in;
4610         /* An extent_count of 0 means we were only counting the extents
4611          * so there are no structs to copy
4612          */
4613         if (fm->fm_extent_count != 0) {
4614             target_size_out += fm->fm_mapped_extents * extent_size;
4615         }
4616         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4617         if (!argptr) {
4618             ret = -TARGET_EFAULT;
4619         } else {
4620             /* Convert the struct fiemap */
4621             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4622             if (fm->fm_extent_count != 0) {
4623                 p = argptr + target_size_in;
4624                 /* ...and then all the struct fiemap_extents */
4625                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4626                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4627                                   THUNK_TARGET);
4628                     p += extent_size;
4629                 }
4630             }
4631             unlock_user(argptr, arg, target_size_out);
4632         }
4633     }
4634     if (free_fm) {
4635         g_free(fm);
4636     }
4637     return ret;
4638 }
4639 #endif
4640 
4641 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4642                                 int fd, int cmd, abi_long arg)
4643 {
4644     const argtype *arg_type = ie->arg_type;
4645     int target_size;
4646     void *argptr;
4647     int ret;
4648     struct ifconf *host_ifconf;
4649     uint32_t outbufsz;
4650     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4651     int target_ifreq_size;
4652     int nb_ifreq;
4653     int free_buf = 0;
4654     int i;
4655     int target_ifc_len;
4656     abi_long target_ifc_buf;
4657     int host_ifc_len;
4658     char *host_ifc_buf;
4659 
4660     assert(arg_type[0] == TYPE_PTR);
4661     assert(ie->access == IOC_RW);
4662 
4663     arg_type++;
4664     target_size = thunk_type_size(arg_type, 0);
4665 
4666     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4667     if (!argptr)
4668         return -TARGET_EFAULT;
4669     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4670     unlock_user(argptr, arg, 0);
4671 
4672     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4673     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4674     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4675 
4676     if (target_ifc_buf != 0) {
4677         target_ifc_len = host_ifconf->ifc_len;
4678         nb_ifreq = target_ifc_len / target_ifreq_size;
4679         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4680 
4681         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4682         if (outbufsz > MAX_STRUCT_SIZE) {
4683             /*
4684              * We can't fit all the extents into the fixed size buffer.
4685              * Allocate one that is large enough and use it instead.
4686              */
4687             host_ifconf = malloc(outbufsz);
4688             if (!host_ifconf) {
4689                 return -TARGET_ENOMEM;
4690             }
4691             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4692             free_buf = 1;
4693         }
4694         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4695 
4696         host_ifconf->ifc_len = host_ifc_len;
4697     } else {
4698       host_ifc_buf = NULL;
4699     }
4700     host_ifconf->ifc_buf = host_ifc_buf;
4701 
4702     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4703     if (!is_error(ret)) {
4704 	/* convert host ifc_len to target ifc_len */
4705 
4706         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4707         target_ifc_len = nb_ifreq * target_ifreq_size;
4708         host_ifconf->ifc_len = target_ifc_len;
4709 
4710 	/* restore target ifc_buf */
4711 
4712         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4713 
4714 	/* copy struct ifconf to target user */
4715 
4716         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4717         if (!argptr)
4718             return -TARGET_EFAULT;
4719         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4720         unlock_user(argptr, arg, target_size);
4721 
4722         if (target_ifc_buf != 0) {
4723             /* copy ifreq[] to target user */
4724             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4725             for (i = 0; i < nb_ifreq ; i++) {
4726                 thunk_convert(argptr + i * target_ifreq_size,
4727                               host_ifc_buf + i * sizeof(struct ifreq),
4728                               ifreq_arg_type, THUNK_TARGET);
4729             }
4730             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4731         }
4732     }
4733 
4734     if (free_buf) {
4735         free(host_ifconf);
4736     }
4737 
4738     return ret;
4739 }
4740 
4741 #if defined(CONFIG_USBFS)
4742 #if HOST_LONG_BITS > 64
4743 #error USBDEVFS thunks do not support >64 bit hosts yet.
4744 #endif
4745 struct live_urb {
4746     uint64_t target_urb_adr;
4747     uint64_t target_buf_adr;
4748     char *target_buf_ptr;
4749     struct usbdevfs_urb host_urb;
4750 };
4751 
4752 static GHashTable *usbdevfs_urb_hashtable(void)
4753 {
4754     static GHashTable *urb_hashtable;
4755 
4756     if (!urb_hashtable) {
4757         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4758     }
4759     return urb_hashtable;
4760 }
4761 
4762 static void urb_hashtable_insert(struct live_urb *urb)
4763 {
4764     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4765     g_hash_table_insert(urb_hashtable, urb, urb);
4766 }
4767 
4768 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4769 {
4770     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4771     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4772 }
4773 
4774 static void urb_hashtable_remove(struct live_urb *urb)
4775 {
4776     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4777     g_hash_table_remove(urb_hashtable, urb);
4778 }
4779 
4780 static abi_long
4781 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4782                           int fd, int cmd, abi_long arg)
4783 {
4784     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4785     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4786     struct live_urb *lurb;
4787     void *argptr;
4788     uint64_t hurb;
4789     int target_size;
4790     uintptr_t target_urb_adr;
4791     abi_long ret;
4792 
4793     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4794 
4795     memset(buf_temp, 0, sizeof(uint64_t));
4796     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4797     if (is_error(ret)) {
4798         return ret;
4799     }
4800 
4801     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4802     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4803     if (!lurb->target_urb_adr) {
4804         return -TARGET_EFAULT;
4805     }
4806     urb_hashtable_remove(lurb);
4807     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4808         lurb->host_urb.buffer_length);
4809     lurb->target_buf_ptr = NULL;
4810 
4811     /* restore the guest buffer pointer */
4812     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4813 
4814     /* update the guest urb struct */
4815     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4816     if (!argptr) {
4817         g_free(lurb);
4818         return -TARGET_EFAULT;
4819     }
4820     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4821     unlock_user(argptr, lurb->target_urb_adr, target_size);
4822 
4823     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4824     /* write back the urb handle */
4825     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4826     if (!argptr) {
4827         g_free(lurb);
4828         return -TARGET_EFAULT;
4829     }
4830 
4831     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4832     target_urb_adr = lurb->target_urb_adr;
4833     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4834     unlock_user(argptr, arg, target_size);
4835 
4836     g_free(lurb);
4837     return ret;
4838 }
4839 
4840 static abi_long
4841 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4842                              uint8_t *buf_temp __attribute__((unused)),
4843                              int fd, int cmd, abi_long arg)
4844 {
4845     struct live_urb *lurb;
4846 
4847     /* map target address back to host URB with metadata. */
4848     lurb = urb_hashtable_lookup(arg);
4849     if (!lurb) {
4850         return -TARGET_EFAULT;
4851     }
4852     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4853 }
4854 
4855 static abi_long
4856 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4857                             int fd, int cmd, abi_long arg)
4858 {
4859     const argtype *arg_type = ie->arg_type;
4860     int target_size;
4861     abi_long ret;
4862     void *argptr;
4863     int rw_dir;
4864     struct live_urb *lurb;
4865 
4866     /*
4867      * each submitted URB needs to map to a unique ID for the
4868      * kernel, and that unique ID needs to be a pointer to
4869      * host memory.  hence, we need to malloc for each URB.
4870      * isochronous transfers have a variable length struct.
4871      */
4872     arg_type++;
4873     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4874 
4875     /* construct host copy of urb and metadata */
4876     lurb = g_try_malloc0(sizeof(struct live_urb));
4877     if (!lurb) {
4878         return -TARGET_ENOMEM;
4879     }
4880 
4881     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4882     if (!argptr) {
4883         g_free(lurb);
4884         return -TARGET_EFAULT;
4885     }
4886     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4887     unlock_user(argptr, arg, 0);
4888 
4889     lurb->target_urb_adr = arg;
4890     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4891 
4892     /* buffer space used depends on endpoint type so lock the entire buffer */
4893     /* control type urbs should check the buffer contents for true direction */
4894     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4895     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4896         lurb->host_urb.buffer_length, 1);
4897     if (lurb->target_buf_ptr == NULL) {
4898         g_free(lurb);
4899         return -TARGET_EFAULT;
4900     }
4901 
4902     /* update buffer pointer in host copy */
4903     lurb->host_urb.buffer = lurb->target_buf_ptr;
4904 
4905     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4906     if (is_error(ret)) {
4907         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4908         g_free(lurb);
4909     } else {
4910         urb_hashtable_insert(lurb);
4911     }
4912 
4913     return ret;
4914 }
4915 #endif /* CONFIG_USBFS */
4916 
4917 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4918                             int cmd, abi_long arg)
4919 {
4920     void *argptr;
4921     struct dm_ioctl *host_dm;
4922     abi_long guest_data;
4923     uint32_t guest_data_size;
4924     int target_size;
4925     const argtype *arg_type = ie->arg_type;
4926     abi_long ret;
4927     void *big_buf = NULL;
4928     char *host_data;
4929 
4930     arg_type++;
4931     target_size = thunk_type_size(arg_type, 0);
4932     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4933     if (!argptr) {
4934         ret = -TARGET_EFAULT;
4935         goto out;
4936     }
4937     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4938     unlock_user(argptr, arg, 0);
4939 
4940     /* buf_temp is too small, so fetch things into a bigger buffer */
4941     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4942     memcpy(big_buf, buf_temp, target_size);
4943     buf_temp = big_buf;
4944     host_dm = big_buf;
4945 
4946     guest_data = arg + host_dm->data_start;
4947     if ((guest_data - arg) < 0) {
4948         ret = -TARGET_EINVAL;
4949         goto out;
4950     }
4951     guest_data_size = host_dm->data_size - host_dm->data_start;
4952     host_data = (char*)host_dm + host_dm->data_start;
4953 
4954     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4955     if (!argptr) {
4956         ret = -TARGET_EFAULT;
4957         goto out;
4958     }
4959 
4960     switch (ie->host_cmd) {
4961     case DM_REMOVE_ALL:
4962     case DM_LIST_DEVICES:
4963     case DM_DEV_CREATE:
4964     case DM_DEV_REMOVE:
4965     case DM_DEV_SUSPEND:
4966     case DM_DEV_STATUS:
4967     case DM_DEV_WAIT:
4968     case DM_TABLE_STATUS:
4969     case DM_TABLE_CLEAR:
4970     case DM_TABLE_DEPS:
4971     case DM_LIST_VERSIONS:
4972         /* no input data */
4973         break;
4974     case DM_DEV_RENAME:
4975     case DM_DEV_SET_GEOMETRY:
4976         /* data contains only strings */
4977         memcpy(host_data, argptr, guest_data_size);
4978         break;
4979     case DM_TARGET_MSG:
4980         memcpy(host_data, argptr, guest_data_size);
4981         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4982         break;
4983     case DM_TABLE_LOAD:
4984     {
4985         void *gspec = argptr;
4986         void *cur_data = host_data;
4987         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4988         int spec_size = thunk_type_size(arg_type, 0);
4989         int i;
4990 
4991         for (i = 0; i < host_dm->target_count; i++) {
4992             struct dm_target_spec *spec = cur_data;
4993             uint32_t next;
4994             int slen;
4995 
4996             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4997             slen = strlen((char*)gspec + spec_size) + 1;
4998             next = spec->next;
4999             spec->next = sizeof(*spec) + slen;
5000             strcpy((char*)&spec[1], gspec + spec_size);
5001             gspec += next;
5002             cur_data += spec->next;
5003         }
5004         break;
5005     }
5006     default:
5007         ret = -TARGET_EINVAL;
5008         unlock_user(argptr, guest_data, 0);
5009         goto out;
5010     }
5011     unlock_user(argptr, guest_data, 0);
5012 
5013     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5014     if (!is_error(ret)) {
5015         guest_data = arg + host_dm->data_start;
5016         guest_data_size = host_dm->data_size - host_dm->data_start;
5017         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5018         switch (ie->host_cmd) {
5019         case DM_REMOVE_ALL:
5020         case DM_DEV_CREATE:
5021         case DM_DEV_REMOVE:
5022         case DM_DEV_RENAME:
5023         case DM_DEV_SUSPEND:
5024         case DM_DEV_STATUS:
5025         case DM_TABLE_LOAD:
5026         case DM_TABLE_CLEAR:
5027         case DM_TARGET_MSG:
5028         case DM_DEV_SET_GEOMETRY:
5029             /* no return data */
5030             break;
5031         case DM_LIST_DEVICES:
5032         {
5033             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5034             uint32_t remaining_data = guest_data_size;
5035             void *cur_data = argptr;
5036             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5037             int nl_size = 12; /* can't use thunk_size due to alignment */
5038 
5039             while (1) {
5040                 uint32_t next = nl->next;
5041                 if (next) {
5042                     nl->next = nl_size + (strlen(nl->name) + 1);
5043                 }
5044                 if (remaining_data < nl->next) {
5045                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5046                     break;
5047                 }
5048                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5049                 strcpy(cur_data + nl_size, nl->name);
5050                 cur_data += nl->next;
5051                 remaining_data -= nl->next;
5052                 if (!next) {
5053                     break;
5054                 }
5055                 nl = (void*)nl + next;
5056             }
5057             break;
5058         }
5059         case DM_DEV_WAIT:
5060         case DM_TABLE_STATUS:
5061         {
5062             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5063             void *cur_data = argptr;
5064             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5065             int spec_size = thunk_type_size(arg_type, 0);
5066             int i;
5067 
5068             for (i = 0; i < host_dm->target_count; i++) {
5069                 uint32_t next = spec->next;
5070                 int slen = strlen((char*)&spec[1]) + 1;
5071                 spec->next = (cur_data - argptr) + spec_size + slen;
5072                 if (guest_data_size < spec->next) {
5073                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5074                     break;
5075                 }
5076                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5077                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5078                 cur_data = argptr + spec->next;
5079                 spec = (void*)host_dm + host_dm->data_start + next;
5080             }
5081             break;
5082         }
5083         case DM_TABLE_DEPS:
5084         {
5085             void *hdata = (void*)host_dm + host_dm->data_start;
5086             int count = *(uint32_t*)hdata;
5087             uint64_t *hdev = hdata + 8;
5088             uint64_t *gdev = argptr + 8;
5089             int i;
5090 
5091             *(uint32_t*)argptr = tswap32(count);
5092             for (i = 0; i < count; i++) {
5093                 *gdev = tswap64(*hdev);
5094                 gdev++;
5095                 hdev++;
5096             }
5097             break;
5098         }
5099         case DM_LIST_VERSIONS:
5100         {
5101             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5102             uint32_t remaining_data = guest_data_size;
5103             void *cur_data = argptr;
5104             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5105             int vers_size = thunk_type_size(arg_type, 0);
5106 
5107             while (1) {
5108                 uint32_t next = vers->next;
5109                 if (next) {
5110                     vers->next = vers_size + (strlen(vers->name) + 1);
5111                 }
5112                 if (remaining_data < vers->next) {
5113                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5114                     break;
5115                 }
5116                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5117                 strcpy(cur_data + vers_size, vers->name);
5118                 cur_data += vers->next;
5119                 remaining_data -= vers->next;
5120                 if (!next) {
5121                     break;
5122                 }
5123                 vers = (void*)vers + next;
5124             }
5125             break;
5126         }
5127         default:
5128             unlock_user(argptr, guest_data, 0);
5129             ret = -TARGET_EINVAL;
5130             goto out;
5131         }
5132         unlock_user(argptr, guest_data, guest_data_size);
5133 
5134         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5135         if (!argptr) {
5136             ret = -TARGET_EFAULT;
5137             goto out;
5138         }
5139         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5140         unlock_user(argptr, arg, target_size);
5141     }
5142 out:
5143     g_free(big_buf);
5144     return ret;
5145 }
5146 
5147 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5148                                int cmd, abi_long arg)
5149 {
5150     void *argptr;
5151     int target_size;
5152     const argtype *arg_type = ie->arg_type;
5153     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5154     abi_long ret;
5155 
5156     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5157     struct blkpg_partition host_part;
5158 
5159     /* Read and convert blkpg */
5160     arg_type++;
5161     target_size = thunk_type_size(arg_type, 0);
5162     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5163     if (!argptr) {
5164         ret = -TARGET_EFAULT;
5165         goto out;
5166     }
5167     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5168     unlock_user(argptr, arg, 0);
5169 
5170     switch (host_blkpg->op) {
5171     case BLKPG_ADD_PARTITION:
5172     case BLKPG_DEL_PARTITION:
5173         /* payload is struct blkpg_partition */
5174         break;
5175     default:
5176         /* Unknown opcode */
5177         ret = -TARGET_EINVAL;
5178         goto out;
5179     }
5180 
5181     /* Read and convert blkpg->data */
5182     arg = (abi_long)(uintptr_t)host_blkpg->data;
5183     target_size = thunk_type_size(part_arg_type, 0);
5184     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5185     if (!argptr) {
5186         ret = -TARGET_EFAULT;
5187         goto out;
5188     }
5189     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5190     unlock_user(argptr, arg, 0);
5191 
5192     /* Swizzle the data pointer to our local copy and call! */
5193     host_blkpg->data = &host_part;
5194     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5195 
5196 out:
5197     return ret;
5198 }
5199 
5200 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5201                                 int fd, int cmd, abi_long arg)
5202 {
5203     const argtype *arg_type = ie->arg_type;
5204     const StructEntry *se;
5205     const argtype *field_types;
5206     const int *dst_offsets, *src_offsets;
5207     int target_size;
5208     void *argptr;
5209     abi_ulong *target_rt_dev_ptr = NULL;
5210     unsigned long *host_rt_dev_ptr = NULL;
5211     abi_long ret;
5212     int i;
5213 
5214     assert(ie->access == IOC_W);
5215     assert(*arg_type == TYPE_PTR);
5216     arg_type++;
5217     assert(*arg_type == TYPE_STRUCT);
5218     target_size = thunk_type_size(arg_type, 0);
5219     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5220     if (!argptr) {
5221         return -TARGET_EFAULT;
5222     }
5223     arg_type++;
5224     assert(*arg_type == (int)STRUCT_rtentry);
5225     se = struct_entries + *arg_type++;
5226     assert(se->convert[0] == NULL);
5227     /* convert struct here to be able to catch rt_dev string */
5228     field_types = se->field_types;
5229     dst_offsets = se->field_offsets[THUNK_HOST];
5230     src_offsets = se->field_offsets[THUNK_TARGET];
5231     for (i = 0; i < se->nb_fields; i++) {
5232         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5233             assert(*field_types == TYPE_PTRVOID);
5234             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5235             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5236             if (*target_rt_dev_ptr != 0) {
5237                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5238                                                   tswapal(*target_rt_dev_ptr));
5239                 if (!*host_rt_dev_ptr) {
5240                     unlock_user(argptr, arg, 0);
5241                     return -TARGET_EFAULT;
5242                 }
5243             } else {
5244                 *host_rt_dev_ptr = 0;
5245             }
5246             field_types++;
5247             continue;
5248         }
5249         field_types = thunk_convert(buf_temp + dst_offsets[i],
5250                                     argptr + src_offsets[i],
5251                                     field_types, THUNK_HOST);
5252     }
5253     unlock_user(argptr, arg, 0);
5254 
5255     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5256 
5257     assert(host_rt_dev_ptr != NULL);
5258     assert(target_rt_dev_ptr != NULL);
5259     if (*host_rt_dev_ptr != 0) {
5260         unlock_user((void *)*host_rt_dev_ptr,
5261                     *target_rt_dev_ptr, 0);
5262     }
5263     return ret;
5264 }
5265 
5266 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5267                                      int fd, int cmd, abi_long arg)
5268 {
5269     int sig = target_to_host_signal(arg);
5270     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5271 }
5272 
5273 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5274                                     int fd, int cmd, abi_long arg)
5275 {
5276     struct timeval tv;
5277     abi_long ret;
5278 
5279     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5280     if (is_error(ret)) {
5281         return ret;
5282     }
5283 
5284     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5285         if (copy_to_user_timeval(arg, &tv)) {
5286             return -TARGET_EFAULT;
5287         }
5288     } else {
5289         if (copy_to_user_timeval64(arg, &tv)) {
5290             return -TARGET_EFAULT;
5291         }
5292     }
5293 
5294     return ret;
5295 }
5296 
5297 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5298                                       int fd, int cmd, abi_long arg)
5299 {
5300     struct timespec ts;
5301     abi_long ret;
5302 
5303     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5304     if (is_error(ret)) {
5305         return ret;
5306     }
5307 
5308     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5309         if (host_to_target_timespec(arg, &ts)) {
5310             return -TARGET_EFAULT;
5311         }
5312     } else{
5313         if (host_to_target_timespec64(arg, &ts)) {
5314             return -TARGET_EFAULT;
5315         }
5316     }
5317 
5318     return ret;
5319 }
5320 
5321 #ifdef TIOCGPTPEER
5322 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5323                                      int fd, int cmd, abi_long arg)
5324 {
5325     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5326     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5327 }
5328 #endif
5329 
5330 #ifdef HAVE_DRM_H
5331 
5332 static void unlock_drm_version(struct drm_version *host_ver,
5333                                struct target_drm_version *target_ver,
5334                                bool copy)
5335 {
5336     unlock_user(host_ver->name, target_ver->name,
5337                                 copy ? host_ver->name_len : 0);
5338     unlock_user(host_ver->date, target_ver->date,
5339                                 copy ? host_ver->date_len : 0);
5340     unlock_user(host_ver->desc, target_ver->desc,
5341                                 copy ? host_ver->desc_len : 0);
5342 }
5343 
5344 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5345                                           struct target_drm_version *target_ver)
5346 {
5347     memset(host_ver, 0, sizeof(*host_ver));
5348 
5349     __get_user(host_ver->name_len, &target_ver->name_len);
5350     if (host_ver->name_len) {
5351         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5352                                    target_ver->name_len, 0);
5353         if (!host_ver->name) {
5354             return -EFAULT;
5355         }
5356     }
5357 
5358     __get_user(host_ver->date_len, &target_ver->date_len);
5359     if (host_ver->date_len) {
5360         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5361                                    target_ver->date_len, 0);
5362         if (!host_ver->date) {
5363             goto err;
5364         }
5365     }
5366 
5367     __get_user(host_ver->desc_len, &target_ver->desc_len);
5368     if (host_ver->desc_len) {
5369         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5370                                    target_ver->desc_len, 0);
5371         if (!host_ver->desc) {
5372             goto err;
5373         }
5374     }
5375 
5376     return 0;
5377 err:
5378     unlock_drm_version(host_ver, target_ver, false);
5379     return -EFAULT;
5380 }
5381 
5382 static inline void host_to_target_drmversion(
5383                                           struct target_drm_version *target_ver,
5384                                           struct drm_version *host_ver)
5385 {
5386     __put_user(host_ver->version_major, &target_ver->version_major);
5387     __put_user(host_ver->version_minor, &target_ver->version_minor);
5388     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5389     __put_user(host_ver->name_len, &target_ver->name_len);
5390     __put_user(host_ver->date_len, &target_ver->date_len);
5391     __put_user(host_ver->desc_len, &target_ver->desc_len);
5392     unlock_drm_version(host_ver, target_ver, true);
5393 }
5394 
5395 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5396                              int fd, int cmd, abi_long arg)
5397 {
5398     struct drm_version *ver;
5399     struct target_drm_version *target_ver;
5400     abi_long ret;
5401 
5402     switch (ie->host_cmd) {
5403     case DRM_IOCTL_VERSION:
5404         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5405             return -TARGET_EFAULT;
5406         }
5407         ver = (struct drm_version *)buf_temp;
5408         ret = target_to_host_drmversion(ver, target_ver);
5409         if (!is_error(ret)) {
5410             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5411             if (is_error(ret)) {
5412                 unlock_drm_version(ver, target_ver, false);
5413             } else {
5414                 host_to_target_drmversion(target_ver, ver);
5415             }
5416         }
5417         unlock_user_struct(target_ver, arg, 0);
5418         return ret;
5419     }
5420     return -TARGET_ENOSYS;
5421 }
5422 
5423 #endif
5424 
5425 IOCTLEntry ioctl_entries[] = {
5426 #define IOCTL(cmd, access, ...) \
5427     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5428 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5429     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5430 #define IOCTL_IGNORE(cmd) \
5431     { TARGET_ ## cmd, 0, #cmd },
5432 #include "ioctls.h"
5433     { 0, 0, },
5434 };
5435 
5436 /* ??? Implement proper locking for ioctls.  */
5437 /* do_ioctl() Must return target values and target errnos. */
5438 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5439 {
5440     const IOCTLEntry *ie;
5441     const argtype *arg_type;
5442     abi_long ret;
5443     uint8_t buf_temp[MAX_STRUCT_SIZE];
5444     int target_size;
5445     void *argptr;
5446 
5447     ie = ioctl_entries;
5448     for(;;) {
5449         if (ie->target_cmd == 0) {
5450             qemu_log_mask(
5451                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5452             return -TARGET_ENOSYS;
5453         }
5454         if (ie->target_cmd == cmd)
5455             break;
5456         ie++;
5457     }
5458     arg_type = ie->arg_type;
5459     if (ie->do_ioctl) {
5460         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5461     } else if (!ie->host_cmd) {
5462         /* Some architectures define BSD ioctls in their headers
5463            that are not implemented in Linux.  */
5464         return -TARGET_ENOSYS;
5465     }
5466 
5467     switch(arg_type[0]) {
5468     case TYPE_NULL:
5469         /* no argument */
5470         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5471         break;
5472     case TYPE_PTRVOID:
5473     case TYPE_INT:
5474     case TYPE_LONG:
5475     case TYPE_ULONG:
5476         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5477         break;
5478     case TYPE_PTR:
5479         arg_type++;
5480         target_size = thunk_type_size(arg_type, 0);
5481         switch(ie->access) {
5482         case IOC_R:
5483             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5484             if (!is_error(ret)) {
5485                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5486                 if (!argptr)
5487                     return -TARGET_EFAULT;
5488                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5489                 unlock_user(argptr, arg, target_size);
5490             }
5491             break;
5492         case IOC_W:
5493             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5494             if (!argptr)
5495                 return -TARGET_EFAULT;
5496             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5497             unlock_user(argptr, arg, 0);
5498             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5499             break;
5500         default:
5501         case IOC_RW:
5502             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5503             if (!argptr)
5504                 return -TARGET_EFAULT;
5505             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5506             unlock_user(argptr, arg, 0);
5507             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5508             if (!is_error(ret)) {
5509                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5510                 if (!argptr)
5511                     return -TARGET_EFAULT;
5512                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5513                 unlock_user(argptr, arg, target_size);
5514             }
5515             break;
5516         }
5517         break;
5518     default:
5519         qemu_log_mask(LOG_UNIMP,
5520                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5521                       (long)cmd, arg_type[0]);
5522         ret = -TARGET_ENOSYS;
5523         break;
5524     }
5525     return ret;
5526 }
5527 
5528 static const bitmask_transtbl iflag_tbl[] = {
5529         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5530         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5531         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5532         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5533         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5534         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5535         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5536         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5537         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5538         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5539         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5540         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5541         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5542         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5543         { 0, 0, 0, 0 }
5544 };
5545 
5546 static const bitmask_transtbl oflag_tbl[] = {
5547 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5548 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5549 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5550 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5551 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5552 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5553 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5554 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5555 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5556 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5557 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5558 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5559 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5560 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5561 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5562 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5563 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5564 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5565 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5566 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5567 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5568 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5569 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5570 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5571 	{ 0, 0, 0, 0 }
5572 };
5573 
5574 static const bitmask_transtbl cflag_tbl[] = {
5575 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5576 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5577 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5578 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5579 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5580 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5581 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5582 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5583 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5584 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5585 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5586 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5587 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5588 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5589 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5590 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5591 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5592 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5593 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5594 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5595 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5596 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5597 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5598 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5599 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5600 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5601 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5602 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5603 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5604 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5605 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5606 	{ 0, 0, 0, 0 }
5607 };
5608 
5609 static const bitmask_transtbl lflag_tbl[] = {
5610 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5611 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5612 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5613 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5614 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5615 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5616 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5617 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5618 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5619 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5620 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5621 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5622 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5623 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5624 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5625 	{ 0, 0, 0, 0 }
5626 };
5627 
5628 static void target_to_host_termios (void *dst, const void *src)
5629 {
5630     struct host_termios *host = dst;
5631     const struct target_termios *target = src;
5632 
5633     host->c_iflag =
5634         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5635     host->c_oflag =
5636         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5637     host->c_cflag =
5638         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5639     host->c_lflag =
5640         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5641     host->c_line = target->c_line;
5642 
5643     memset(host->c_cc, 0, sizeof(host->c_cc));
5644     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5645     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5646     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5647     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5648     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5649     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5650     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5651     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5652     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5653     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5654     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5655     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5656     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5657     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5658     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5659     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5660     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5661 }
5662 
5663 static void host_to_target_termios (void *dst, const void *src)
5664 {
5665     struct target_termios *target = dst;
5666     const struct host_termios *host = src;
5667 
5668     target->c_iflag =
5669         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5670     target->c_oflag =
5671         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5672     target->c_cflag =
5673         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5674     target->c_lflag =
5675         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5676     target->c_line = host->c_line;
5677 
5678     memset(target->c_cc, 0, sizeof(target->c_cc));
5679     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5680     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5681     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5682     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5683     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5684     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5685     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5686     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5687     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5688     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5689     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5690     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5691     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5692     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5693     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5694     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5695     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5696 }
5697 
5698 static const StructEntry struct_termios_def = {
5699     .convert = { host_to_target_termios, target_to_host_termios },
5700     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5701     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5702 };
5703 
5704 static bitmask_transtbl mmap_flags_tbl[] = {
5705     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5706     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5707     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5708     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5709       MAP_ANONYMOUS, MAP_ANONYMOUS },
5710     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5711       MAP_GROWSDOWN, MAP_GROWSDOWN },
5712     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5713       MAP_DENYWRITE, MAP_DENYWRITE },
5714     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5715       MAP_EXECUTABLE, MAP_EXECUTABLE },
5716     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5717     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5718       MAP_NORESERVE, MAP_NORESERVE },
5719     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5720     /* MAP_STACK had been ignored by the kernel for quite some time.
5721        Recognize it for the target insofar as we do not want to pass
5722        it through to the host.  */
5723     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5724     { 0, 0, 0, 0 }
5725 };
5726 
5727 /*
5728  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5729  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5730  */
5731 #if defined(TARGET_I386)
5732 
5733 /* NOTE: there is really one LDT for all the threads */
5734 static uint8_t *ldt_table;
5735 
5736 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5737 {
5738     int size;
5739     void *p;
5740 
5741     if (!ldt_table)
5742         return 0;
5743     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5744     if (size > bytecount)
5745         size = bytecount;
5746     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5747     if (!p)
5748         return -TARGET_EFAULT;
5749     /* ??? Should this by byteswapped?  */
5750     memcpy(p, ldt_table, size);
5751     unlock_user(p, ptr, size);
5752     return size;
5753 }
5754 
5755 /* XXX: add locking support */
5756 static abi_long write_ldt(CPUX86State *env,
5757                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5758 {
5759     struct target_modify_ldt_ldt_s ldt_info;
5760     struct target_modify_ldt_ldt_s *target_ldt_info;
5761     int seg_32bit, contents, read_exec_only, limit_in_pages;
5762     int seg_not_present, useable, lm;
5763     uint32_t *lp, entry_1, entry_2;
5764 
5765     if (bytecount != sizeof(ldt_info))
5766         return -TARGET_EINVAL;
5767     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5768         return -TARGET_EFAULT;
5769     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5770     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5771     ldt_info.limit = tswap32(target_ldt_info->limit);
5772     ldt_info.flags = tswap32(target_ldt_info->flags);
5773     unlock_user_struct(target_ldt_info, ptr, 0);
5774 
5775     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5776         return -TARGET_EINVAL;
5777     seg_32bit = ldt_info.flags & 1;
5778     contents = (ldt_info.flags >> 1) & 3;
5779     read_exec_only = (ldt_info.flags >> 3) & 1;
5780     limit_in_pages = (ldt_info.flags >> 4) & 1;
5781     seg_not_present = (ldt_info.flags >> 5) & 1;
5782     useable = (ldt_info.flags >> 6) & 1;
5783 #ifdef TARGET_ABI32
5784     lm = 0;
5785 #else
5786     lm = (ldt_info.flags >> 7) & 1;
5787 #endif
5788     if (contents == 3) {
5789         if (oldmode)
5790             return -TARGET_EINVAL;
5791         if (seg_not_present == 0)
5792             return -TARGET_EINVAL;
5793     }
5794     /* allocate the LDT */
5795     if (!ldt_table) {
5796         env->ldt.base = target_mmap(0,
5797                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5798                                     PROT_READ|PROT_WRITE,
5799                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5800         if (env->ldt.base == -1)
5801             return -TARGET_ENOMEM;
5802         memset(g2h(env->ldt.base), 0,
5803                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5804         env->ldt.limit = 0xffff;
5805         ldt_table = g2h(env->ldt.base);
5806     }
5807 
5808     /* NOTE: same code as Linux kernel */
5809     /* Allow LDTs to be cleared by the user. */
5810     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5811         if (oldmode ||
5812             (contents == 0		&&
5813              read_exec_only == 1	&&
5814              seg_32bit == 0		&&
5815              limit_in_pages == 0	&&
5816              seg_not_present == 1	&&
5817              useable == 0 )) {
5818             entry_1 = 0;
5819             entry_2 = 0;
5820             goto install;
5821         }
5822     }
5823 
5824     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5825         (ldt_info.limit & 0x0ffff);
5826     entry_2 = (ldt_info.base_addr & 0xff000000) |
5827         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5828         (ldt_info.limit & 0xf0000) |
5829         ((read_exec_only ^ 1) << 9) |
5830         (contents << 10) |
5831         ((seg_not_present ^ 1) << 15) |
5832         (seg_32bit << 22) |
5833         (limit_in_pages << 23) |
5834         (lm << 21) |
5835         0x7000;
5836     if (!oldmode)
5837         entry_2 |= (useable << 20);
5838 
5839     /* Install the new entry ...  */
5840 install:
5841     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5842     lp[0] = tswap32(entry_1);
5843     lp[1] = tswap32(entry_2);
5844     return 0;
5845 }
5846 
5847 /* specific and weird i386 syscalls */
5848 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5849                               unsigned long bytecount)
5850 {
5851     abi_long ret;
5852 
5853     switch (func) {
5854     case 0:
5855         ret = read_ldt(ptr, bytecount);
5856         break;
5857     case 1:
5858         ret = write_ldt(env, ptr, bytecount, 1);
5859         break;
5860     case 0x11:
5861         ret = write_ldt(env, ptr, bytecount, 0);
5862         break;
5863     default:
5864         ret = -TARGET_ENOSYS;
5865         break;
5866     }
5867     return ret;
5868 }
5869 
5870 #if defined(TARGET_ABI32)
5871 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5872 {
5873     uint64_t *gdt_table = g2h(env->gdt.base);
5874     struct target_modify_ldt_ldt_s ldt_info;
5875     struct target_modify_ldt_ldt_s *target_ldt_info;
5876     int seg_32bit, contents, read_exec_only, limit_in_pages;
5877     int seg_not_present, useable, lm;
5878     uint32_t *lp, entry_1, entry_2;
5879     int i;
5880 
5881     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5882     if (!target_ldt_info)
5883         return -TARGET_EFAULT;
5884     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5885     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5886     ldt_info.limit = tswap32(target_ldt_info->limit);
5887     ldt_info.flags = tswap32(target_ldt_info->flags);
5888     if (ldt_info.entry_number == -1) {
5889         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5890             if (gdt_table[i] == 0) {
5891                 ldt_info.entry_number = i;
5892                 target_ldt_info->entry_number = tswap32(i);
5893                 break;
5894             }
5895         }
5896     }
5897     unlock_user_struct(target_ldt_info, ptr, 1);
5898 
5899     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5900         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5901            return -TARGET_EINVAL;
5902     seg_32bit = ldt_info.flags & 1;
5903     contents = (ldt_info.flags >> 1) & 3;
5904     read_exec_only = (ldt_info.flags >> 3) & 1;
5905     limit_in_pages = (ldt_info.flags >> 4) & 1;
5906     seg_not_present = (ldt_info.flags >> 5) & 1;
5907     useable = (ldt_info.flags >> 6) & 1;
5908 #ifdef TARGET_ABI32
5909     lm = 0;
5910 #else
5911     lm = (ldt_info.flags >> 7) & 1;
5912 #endif
5913 
5914     if (contents == 3) {
5915         if (seg_not_present == 0)
5916             return -TARGET_EINVAL;
5917     }
5918 
5919     /* NOTE: same code as Linux kernel */
5920     /* Allow LDTs to be cleared by the user. */
5921     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5922         if ((contents == 0             &&
5923              read_exec_only == 1       &&
5924              seg_32bit == 0            &&
5925              limit_in_pages == 0       &&
5926              seg_not_present == 1      &&
5927              useable == 0 )) {
5928             entry_1 = 0;
5929             entry_2 = 0;
5930             goto install;
5931         }
5932     }
5933 
5934     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5935         (ldt_info.limit & 0x0ffff);
5936     entry_2 = (ldt_info.base_addr & 0xff000000) |
5937         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5938         (ldt_info.limit & 0xf0000) |
5939         ((read_exec_only ^ 1) << 9) |
5940         (contents << 10) |
5941         ((seg_not_present ^ 1) << 15) |
5942         (seg_32bit << 22) |
5943         (limit_in_pages << 23) |
5944         (useable << 20) |
5945         (lm << 21) |
5946         0x7000;
5947 
5948     /* Install the new entry ...  */
5949 install:
5950     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5951     lp[0] = tswap32(entry_1);
5952     lp[1] = tswap32(entry_2);
5953     return 0;
5954 }
5955 
5956 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5957 {
5958     struct target_modify_ldt_ldt_s *target_ldt_info;
5959     uint64_t *gdt_table = g2h(env->gdt.base);
5960     uint32_t base_addr, limit, flags;
5961     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5962     int seg_not_present, useable, lm;
5963     uint32_t *lp, entry_1, entry_2;
5964 
5965     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5966     if (!target_ldt_info)
5967         return -TARGET_EFAULT;
5968     idx = tswap32(target_ldt_info->entry_number);
5969     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5970         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5971         unlock_user_struct(target_ldt_info, ptr, 1);
5972         return -TARGET_EINVAL;
5973     }
5974     lp = (uint32_t *)(gdt_table + idx);
5975     entry_1 = tswap32(lp[0]);
5976     entry_2 = tswap32(lp[1]);
5977 
5978     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5979     contents = (entry_2 >> 10) & 3;
5980     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5981     seg_32bit = (entry_2 >> 22) & 1;
5982     limit_in_pages = (entry_2 >> 23) & 1;
5983     useable = (entry_2 >> 20) & 1;
5984 #ifdef TARGET_ABI32
5985     lm = 0;
5986 #else
5987     lm = (entry_2 >> 21) & 1;
5988 #endif
5989     flags = (seg_32bit << 0) | (contents << 1) |
5990         (read_exec_only << 3) | (limit_in_pages << 4) |
5991         (seg_not_present << 5) | (useable << 6) | (lm << 7);
5992     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5993     base_addr = (entry_1 >> 16) |
5994         (entry_2 & 0xff000000) |
5995         ((entry_2 & 0xff) << 16);
5996     target_ldt_info->base_addr = tswapal(base_addr);
5997     target_ldt_info->limit = tswap32(limit);
5998     target_ldt_info->flags = tswap32(flags);
5999     unlock_user_struct(target_ldt_info, ptr, 1);
6000     return 0;
6001 }
6002 
6003 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6004 {
6005     return -TARGET_ENOSYS;
6006 }
6007 #else
6008 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6009 {
6010     abi_long ret = 0;
6011     abi_ulong val;
6012     int idx;
6013 
6014     switch(code) {
6015     case TARGET_ARCH_SET_GS:
6016     case TARGET_ARCH_SET_FS:
6017         if (code == TARGET_ARCH_SET_GS)
6018             idx = R_GS;
6019         else
6020             idx = R_FS;
6021         cpu_x86_load_seg(env, idx, 0);
6022         env->segs[idx].base = addr;
6023         break;
6024     case TARGET_ARCH_GET_GS:
6025     case TARGET_ARCH_GET_FS:
6026         if (code == TARGET_ARCH_GET_GS)
6027             idx = R_GS;
6028         else
6029             idx = R_FS;
6030         val = env->segs[idx].base;
6031         if (put_user(val, addr, abi_ulong))
6032             ret = -TARGET_EFAULT;
6033         break;
6034     default:
6035         ret = -TARGET_EINVAL;
6036         break;
6037     }
6038     return ret;
6039 }
6040 #endif /* defined(TARGET_ABI32 */
6041 
6042 #endif /* defined(TARGET_I386) */
6043 
6044 #define NEW_STACK_SIZE 0x40000
6045 
6046 
6047 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6048 typedef struct {
6049     CPUArchState *env;
6050     pthread_mutex_t mutex;
6051     pthread_cond_t cond;
6052     pthread_t thread;
6053     uint32_t tid;
6054     abi_ulong child_tidptr;
6055     abi_ulong parent_tidptr;
6056     sigset_t sigmask;
6057 } new_thread_info;
6058 
6059 static void *clone_func(void *arg)
6060 {
6061     new_thread_info *info = arg;
6062     CPUArchState *env;
6063     CPUState *cpu;
6064     TaskState *ts;
6065 
6066     rcu_register_thread();
6067     tcg_register_thread();
6068     env = info->env;
6069     cpu = env_cpu(env);
6070     thread_cpu = cpu;
6071     ts = (TaskState *)cpu->opaque;
6072     info->tid = sys_gettid();
6073     task_settid(ts);
6074     if (info->child_tidptr)
6075         put_user_u32(info->tid, info->child_tidptr);
6076     if (info->parent_tidptr)
6077         put_user_u32(info->tid, info->parent_tidptr);
6078     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6079     /* Enable signals.  */
6080     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6081     /* Signal to the parent that we're ready.  */
6082     pthread_mutex_lock(&info->mutex);
6083     pthread_cond_broadcast(&info->cond);
6084     pthread_mutex_unlock(&info->mutex);
6085     /* Wait until the parent has finished initializing the tls state.  */
6086     pthread_mutex_lock(&clone_lock);
6087     pthread_mutex_unlock(&clone_lock);
6088     cpu_loop(env);
6089     /* never exits */
6090     return NULL;
6091 }
6092 
6093 /* do_fork() Must return host values and target errnos (unlike most
6094    do_*() functions). */
6095 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6096                    abi_ulong parent_tidptr, target_ulong newtls,
6097                    abi_ulong child_tidptr)
6098 {
6099     CPUState *cpu = env_cpu(env);
6100     int ret;
6101     TaskState *ts;
6102     CPUState *new_cpu;
6103     CPUArchState *new_env;
6104     sigset_t sigmask;
6105 
6106     flags &= ~CLONE_IGNORED_FLAGS;
6107 
6108     /* Emulate vfork() with fork() */
6109     if (flags & CLONE_VFORK)
6110         flags &= ~(CLONE_VFORK | CLONE_VM);
6111 
6112     if (flags & CLONE_VM) {
6113         TaskState *parent_ts = (TaskState *)cpu->opaque;
6114         new_thread_info info;
6115         pthread_attr_t attr;
6116 
6117         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6118             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6119             return -TARGET_EINVAL;
6120         }
6121 
6122         ts = g_new0(TaskState, 1);
6123         init_task_state(ts);
6124 
6125         /* Grab a mutex so that thread setup appears atomic.  */
6126         pthread_mutex_lock(&clone_lock);
6127 
6128         /* we create a new CPU instance. */
6129         new_env = cpu_copy(env);
6130         /* Init regs that differ from the parent.  */
6131         cpu_clone_regs_child(new_env, newsp, flags);
6132         cpu_clone_regs_parent(env, flags);
6133         new_cpu = env_cpu(new_env);
6134         new_cpu->opaque = ts;
6135         ts->bprm = parent_ts->bprm;
6136         ts->info = parent_ts->info;
6137         ts->signal_mask = parent_ts->signal_mask;
6138 
6139         if (flags & CLONE_CHILD_CLEARTID) {
6140             ts->child_tidptr = child_tidptr;
6141         }
6142 
6143         if (flags & CLONE_SETTLS) {
6144             cpu_set_tls (new_env, newtls);
6145         }
6146 
6147         memset(&info, 0, sizeof(info));
6148         pthread_mutex_init(&info.mutex, NULL);
6149         pthread_mutex_lock(&info.mutex);
6150         pthread_cond_init(&info.cond, NULL);
6151         info.env = new_env;
6152         if (flags & CLONE_CHILD_SETTID) {
6153             info.child_tidptr = child_tidptr;
6154         }
6155         if (flags & CLONE_PARENT_SETTID) {
6156             info.parent_tidptr = parent_tidptr;
6157         }
6158 
6159         ret = pthread_attr_init(&attr);
6160         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6161         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6162         /* It is not safe to deliver signals until the child has finished
6163            initializing, so temporarily block all signals.  */
6164         sigfillset(&sigmask);
6165         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6166         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6167 
6168         /* If this is our first additional thread, we need to ensure we
6169          * generate code for parallel execution and flush old translations.
6170          */
6171         if (!parallel_cpus) {
6172             parallel_cpus = true;
6173             tb_flush(cpu);
6174         }
6175 
6176         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6177         /* TODO: Free new CPU state if thread creation failed.  */
6178 
6179         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6180         pthread_attr_destroy(&attr);
6181         if (ret == 0) {
6182             /* Wait for the child to initialize.  */
6183             pthread_cond_wait(&info.cond, &info.mutex);
6184             ret = info.tid;
6185         } else {
6186             ret = -1;
6187         }
6188         pthread_mutex_unlock(&info.mutex);
6189         pthread_cond_destroy(&info.cond);
6190         pthread_mutex_destroy(&info.mutex);
6191         pthread_mutex_unlock(&clone_lock);
6192     } else {
6193         /* if no CLONE_VM, we consider it is a fork */
6194         if (flags & CLONE_INVALID_FORK_FLAGS) {
6195             return -TARGET_EINVAL;
6196         }
6197 
6198         /* We can't support custom termination signals */
6199         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6200             return -TARGET_EINVAL;
6201         }
6202 
6203         if (block_signals()) {
6204             return -TARGET_ERESTARTSYS;
6205         }
6206 
6207         fork_start();
6208         ret = fork();
6209         if (ret == 0) {
6210             /* Child Process.  */
6211             cpu_clone_regs_child(env, newsp, flags);
6212             fork_end(1);
6213             /* There is a race condition here.  The parent process could
6214                theoretically read the TID in the child process before the child
6215                tid is set.  This would require using either ptrace
6216                (not implemented) or having *_tidptr to point at a shared memory
6217                mapping.  We can't repeat the spinlock hack used above because
6218                the child process gets its own copy of the lock.  */
6219             if (flags & CLONE_CHILD_SETTID)
6220                 put_user_u32(sys_gettid(), child_tidptr);
6221             if (flags & CLONE_PARENT_SETTID)
6222                 put_user_u32(sys_gettid(), parent_tidptr);
6223             ts = (TaskState *)cpu->opaque;
6224             if (flags & CLONE_SETTLS)
6225                 cpu_set_tls (env, newtls);
6226             if (flags & CLONE_CHILD_CLEARTID)
6227                 ts->child_tidptr = child_tidptr;
6228         } else {
6229             cpu_clone_regs_parent(env, flags);
6230             fork_end(0);
6231         }
6232     }
6233     return ret;
6234 }
6235 
6236 /* warning : doesn't handle linux specific flags... */
6237 static int target_to_host_fcntl_cmd(int cmd)
6238 {
6239     int ret;
6240 
6241     switch(cmd) {
6242     case TARGET_F_DUPFD:
6243     case TARGET_F_GETFD:
6244     case TARGET_F_SETFD:
6245     case TARGET_F_GETFL:
6246     case TARGET_F_SETFL:
6247     case TARGET_F_OFD_GETLK:
6248     case TARGET_F_OFD_SETLK:
6249     case TARGET_F_OFD_SETLKW:
6250         ret = cmd;
6251         break;
6252     case TARGET_F_GETLK:
6253         ret = F_GETLK64;
6254         break;
6255     case TARGET_F_SETLK:
6256         ret = F_SETLK64;
6257         break;
6258     case TARGET_F_SETLKW:
6259         ret = F_SETLKW64;
6260         break;
6261     case TARGET_F_GETOWN:
6262         ret = F_GETOWN;
6263         break;
6264     case TARGET_F_SETOWN:
6265         ret = F_SETOWN;
6266         break;
6267     case TARGET_F_GETSIG:
6268         ret = F_GETSIG;
6269         break;
6270     case TARGET_F_SETSIG:
6271         ret = F_SETSIG;
6272         break;
6273 #if TARGET_ABI_BITS == 32
6274     case TARGET_F_GETLK64:
6275         ret = F_GETLK64;
6276         break;
6277     case TARGET_F_SETLK64:
6278         ret = F_SETLK64;
6279         break;
6280     case TARGET_F_SETLKW64:
6281         ret = F_SETLKW64;
6282         break;
6283 #endif
6284     case TARGET_F_SETLEASE:
6285         ret = F_SETLEASE;
6286         break;
6287     case TARGET_F_GETLEASE:
6288         ret = F_GETLEASE;
6289         break;
6290 #ifdef F_DUPFD_CLOEXEC
6291     case TARGET_F_DUPFD_CLOEXEC:
6292         ret = F_DUPFD_CLOEXEC;
6293         break;
6294 #endif
6295     case TARGET_F_NOTIFY:
6296         ret = F_NOTIFY;
6297         break;
6298 #ifdef F_GETOWN_EX
6299     case TARGET_F_GETOWN_EX:
6300         ret = F_GETOWN_EX;
6301         break;
6302 #endif
6303 #ifdef F_SETOWN_EX
6304     case TARGET_F_SETOWN_EX:
6305         ret = F_SETOWN_EX;
6306         break;
6307 #endif
6308 #ifdef F_SETPIPE_SZ
6309     case TARGET_F_SETPIPE_SZ:
6310         ret = F_SETPIPE_SZ;
6311         break;
6312     case TARGET_F_GETPIPE_SZ:
6313         ret = F_GETPIPE_SZ;
6314         break;
6315 #endif
6316     default:
6317         ret = -TARGET_EINVAL;
6318         break;
6319     }
6320 
6321 #if defined(__powerpc64__)
6322     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6323      * is not supported by kernel. The glibc fcntl call actually adjusts
6324      * them to 5, 6 and 7 before making the syscall(). Since we make the
6325      * syscall directly, adjust to what is supported by the kernel.
6326      */
6327     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6328         ret -= F_GETLK64 - 5;
6329     }
6330 #endif
6331 
6332     return ret;
6333 }
6334 
6335 #define FLOCK_TRANSTBL \
6336     switch (type) { \
6337     TRANSTBL_CONVERT(F_RDLCK); \
6338     TRANSTBL_CONVERT(F_WRLCK); \
6339     TRANSTBL_CONVERT(F_UNLCK); \
6340     TRANSTBL_CONVERT(F_EXLCK); \
6341     TRANSTBL_CONVERT(F_SHLCK); \
6342     }
6343 
6344 static int target_to_host_flock(int type)
6345 {
6346 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6347     FLOCK_TRANSTBL
6348 #undef  TRANSTBL_CONVERT
6349     return -TARGET_EINVAL;
6350 }
6351 
6352 static int host_to_target_flock(int type)
6353 {
6354 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6355     FLOCK_TRANSTBL
6356 #undef  TRANSTBL_CONVERT
6357     /* if we don't know how to convert the value coming
6358      * from the host we copy to the target field as-is
6359      */
6360     return type;
6361 }
6362 
6363 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6364                                             abi_ulong target_flock_addr)
6365 {
6366     struct target_flock *target_fl;
6367     int l_type;
6368 
6369     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6370         return -TARGET_EFAULT;
6371     }
6372 
6373     __get_user(l_type, &target_fl->l_type);
6374     l_type = target_to_host_flock(l_type);
6375     if (l_type < 0) {
6376         return l_type;
6377     }
6378     fl->l_type = l_type;
6379     __get_user(fl->l_whence, &target_fl->l_whence);
6380     __get_user(fl->l_start, &target_fl->l_start);
6381     __get_user(fl->l_len, &target_fl->l_len);
6382     __get_user(fl->l_pid, &target_fl->l_pid);
6383     unlock_user_struct(target_fl, target_flock_addr, 0);
6384     return 0;
6385 }
6386 
6387 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6388                                           const struct flock64 *fl)
6389 {
6390     struct target_flock *target_fl;
6391     short l_type;
6392 
6393     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6394         return -TARGET_EFAULT;
6395     }
6396 
6397     l_type = host_to_target_flock(fl->l_type);
6398     __put_user(l_type, &target_fl->l_type);
6399     __put_user(fl->l_whence, &target_fl->l_whence);
6400     __put_user(fl->l_start, &target_fl->l_start);
6401     __put_user(fl->l_len, &target_fl->l_len);
6402     __put_user(fl->l_pid, &target_fl->l_pid);
6403     unlock_user_struct(target_fl, target_flock_addr, 1);
6404     return 0;
6405 }
6406 
6407 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6408 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6409 
6410 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6411 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6412                                                    abi_ulong target_flock_addr)
6413 {
6414     struct target_oabi_flock64 *target_fl;
6415     int l_type;
6416 
6417     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6418         return -TARGET_EFAULT;
6419     }
6420 
6421     __get_user(l_type, &target_fl->l_type);
6422     l_type = target_to_host_flock(l_type);
6423     if (l_type < 0) {
6424         return l_type;
6425     }
6426     fl->l_type = l_type;
6427     __get_user(fl->l_whence, &target_fl->l_whence);
6428     __get_user(fl->l_start, &target_fl->l_start);
6429     __get_user(fl->l_len, &target_fl->l_len);
6430     __get_user(fl->l_pid, &target_fl->l_pid);
6431     unlock_user_struct(target_fl, target_flock_addr, 0);
6432     return 0;
6433 }
6434 
6435 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6436                                                  const struct flock64 *fl)
6437 {
6438     struct target_oabi_flock64 *target_fl;
6439     short l_type;
6440 
6441     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6442         return -TARGET_EFAULT;
6443     }
6444 
6445     l_type = host_to_target_flock(fl->l_type);
6446     __put_user(l_type, &target_fl->l_type);
6447     __put_user(fl->l_whence, &target_fl->l_whence);
6448     __put_user(fl->l_start, &target_fl->l_start);
6449     __put_user(fl->l_len, &target_fl->l_len);
6450     __put_user(fl->l_pid, &target_fl->l_pid);
6451     unlock_user_struct(target_fl, target_flock_addr, 1);
6452     return 0;
6453 }
6454 #endif
6455 
6456 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6457                                               abi_ulong target_flock_addr)
6458 {
6459     struct target_flock64 *target_fl;
6460     int l_type;
6461 
6462     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6463         return -TARGET_EFAULT;
6464     }
6465 
6466     __get_user(l_type, &target_fl->l_type);
6467     l_type = target_to_host_flock(l_type);
6468     if (l_type < 0) {
6469         return l_type;
6470     }
6471     fl->l_type = l_type;
6472     __get_user(fl->l_whence, &target_fl->l_whence);
6473     __get_user(fl->l_start, &target_fl->l_start);
6474     __get_user(fl->l_len, &target_fl->l_len);
6475     __get_user(fl->l_pid, &target_fl->l_pid);
6476     unlock_user_struct(target_fl, target_flock_addr, 0);
6477     return 0;
6478 }
6479 
6480 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6481                                             const struct flock64 *fl)
6482 {
6483     struct target_flock64 *target_fl;
6484     short l_type;
6485 
6486     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6487         return -TARGET_EFAULT;
6488     }
6489 
6490     l_type = host_to_target_flock(fl->l_type);
6491     __put_user(l_type, &target_fl->l_type);
6492     __put_user(fl->l_whence, &target_fl->l_whence);
6493     __put_user(fl->l_start, &target_fl->l_start);
6494     __put_user(fl->l_len, &target_fl->l_len);
6495     __put_user(fl->l_pid, &target_fl->l_pid);
6496     unlock_user_struct(target_fl, target_flock_addr, 1);
6497     return 0;
6498 }
6499 
6500 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6501 {
6502     struct flock64 fl64;
6503 #ifdef F_GETOWN_EX
6504     struct f_owner_ex fox;
6505     struct target_f_owner_ex *target_fox;
6506 #endif
6507     abi_long ret;
6508     int host_cmd = target_to_host_fcntl_cmd(cmd);
6509 
6510     if (host_cmd == -TARGET_EINVAL)
6511 	    return host_cmd;
6512 
6513     switch(cmd) {
6514     case TARGET_F_GETLK:
6515         ret = copy_from_user_flock(&fl64, arg);
6516         if (ret) {
6517             return ret;
6518         }
6519         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6520         if (ret == 0) {
6521             ret = copy_to_user_flock(arg, &fl64);
6522         }
6523         break;
6524 
6525     case TARGET_F_SETLK:
6526     case TARGET_F_SETLKW:
6527         ret = copy_from_user_flock(&fl64, arg);
6528         if (ret) {
6529             return ret;
6530         }
6531         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6532         break;
6533 
6534     case TARGET_F_GETLK64:
6535     case TARGET_F_OFD_GETLK:
6536         ret = copy_from_user_flock64(&fl64, arg);
6537         if (ret) {
6538             return ret;
6539         }
6540         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6541         if (ret == 0) {
6542             ret = copy_to_user_flock64(arg, &fl64);
6543         }
6544         break;
6545     case TARGET_F_SETLK64:
6546     case TARGET_F_SETLKW64:
6547     case TARGET_F_OFD_SETLK:
6548     case TARGET_F_OFD_SETLKW:
6549         ret = copy_from_user_flock64(&fl64, arg);
6550         if (ret) {
6551             return ret;
6552         }
6553         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6554         break;
6555 
6556     case TARGET_F_GETFL:
6557         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6558         if (ret >= 0) {
6559             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6560         }
6561         break;
6562 
6563     case TARGET_F_SETFL:
6564         ret = get_errno(safe_fcntl(fd, host_cmd,
6565                                    target_to_host_bitmask(arg,
6566                                                           fcntl_flags_tbl)));
6567         break;
6568 
6569 #ifdef F_GETOWN_EX
6570     case TARGET_F_GETOWN_EX:
6571         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6572         if (ret >= 0) {
6573             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6574                 return -TARGET_EFAULT;
6575             target_fox->type = tswap32(fox.type);
6576             target_fox->pid = tswap32(fox.pid);
6577             unlock_user_struct(target_fox, arg, 1);
6578         }
6579         break;
6580 #endif
6581 
6582 #ifdef F_SETOWN_EX
6583     case TARGET_F_SETOWN_EX:
6584         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6585             return -TARGET_EFAULT;
6586         fox.type = tswap32(target_fox->type);
6587         fox.pid = tswap32(target_fox->pid);
6588         unlock_user_struct(target_fox, arg, 0);
6589         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6590         break;
6591 #endif
6592 
6593     case TARGET_F_SETOWN:
6594     case TARGET_F_GETOWN:
6595     case TARGET_F_SETSIG:
6596     case TARGET_F_GETSIG:
6597     case TARGET_F_SETLEASE:
6598     case TARGET_F_GETLEASE:
6599     case TARGET_F_SETPIPE_SZ:
6600     case TARGET_F_GETPIPE_SZ:
6601         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6602         break;
6603 
6604     default:
6605         ret = get_errno(safe_fcntl(fd, cmd, arg));
6606         break;
6607     }
6608     return ret;
6609 }
6610 
6611 #ifdef USE_UID16
6612 
6613 static inline int high2lowuid(int uid)
6614 {
6615     if (uid > 65535)
6616         return 65534;
6617     else
6618         return uid;
6619 }
6620 
6621 static inline int high2lowgid(int gid)
6622 {
6623     if (gid > 65535)
6624         return 65534;
6625     else
6626         return gid;
6627 }
6628 
6629 static inline int low2highuid(int uid)
6630 {
6631     if ((int16_t)uid == -1)
6632         return -1;
6633     else
6634         return uid;
6635 }
6636 
6637 static inline int low2highgid(int gid)
6638 {
6639     if ((int16_t)gid == -1)
6640         return -1;
6641     else
6642         return gid;
6643 }
6644 static inline int tswapid(int id)
6645 {
6646     return tswap16(id);
6647 }
6648 
6649 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6650 
6651 #else /* !USE_UID16 */
6652 static inline int high2lowuid(int uid)
6653 {
6654     return uid;
6655 }
6656 static inline int high2lowgid(int gid)
6657 {
6658     return gid;
6659 }
6660 static inline int low2highuid(int uid)
6661 {
6662     return uid;
6663 }
6664 static inline int low2highgid(int gid)
6665 {
6666     return gid;
6667 }
6668 static inline int tswapid(int id)
6669 {
6670     return tswap32(id);
6671 }
6672 
6673 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6674 
6675 #endif /* USE_UID16 */
6676 
6677 /* We must do direct syscalls for setting UID/GID, because we want to
6678  * implement the Linux system call semantics of "change only for this thread",
6679  * not the libc/POSIX semantics of "change for all threads in process".
6680  * (See http://ewontfix.com/17/ for more details.)
6681  * We use the 32-bit version of the syscalls if present; if it is not
6682  * then either the host architecture supports 32-bit UIDs natively with
6683  * the standard syscall, or the 16-bit UID is the best we can do.
6684  */
6685 #ifdef __NR_setuid32
6686 #define __NR_sys_setuid __NR_setuid32
6687 #else
6688 #define __NR_sys_setuid __NR_setuid
6689 #endif
6690 #ifdef __NR_setgid32
6691 #define __NR_sys_setgid __NR_setgid32
6692 #else
6693 #define __NR_sys_setgid __NR_setgid
6694 #endif
6695 #ifdef __NR_setresuid32
6696 #define __NR_sys_setresuid __NR_setresuid32
6697 #else
6698 #define __NR_sys_setresuid __NR_setresuid
6699 #endif
6700 #ifdef __NR_setresgid32
6701 #define __NR_sys_setresgid __NR_setresgid32
6702 #else
6703 #define __NR_sys_setresgid __NR_setresgid
6704 #endif
6705 
6706 _syscall1(int, sys_setuid, uid_t, uid)
6707 _syscall1(int, sys_setgid, gid_t, gid)
6708 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6709 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6710 
6711 void syscall_init(void)
6712 {
6713     IOCTLEntry *ie;
6714     const argtype *arg_type;
6715     int size;
6716     int i;
6717 
6718     thunk_init(STRUCT_MAX);
6719 
6720 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6721 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6722 #include "syscall_types.h"
6723 #undef STRUCT
6724 #undef STRUCT_SPECIAL
6725 
6726     /* Build target_to_host_errno_table[] table from
6727      * host_to_target_errno_table[]. */
6728     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6729         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6730     }
6731 
6732     /* we patch the ioctl size if necessary. We rely on the fact that
6733        no ioctl has all the bits at '1' in the size field */
6734     ie = ioctl_entries;
6735     while (ie->target_cmd != 0) {
6736         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6737             TARGET_IOC_SIZEMASK) {
6738             arg_type = ie->arg_type;
6739             if (arg_type[0] != TYPE_PTR) {
6740                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6741                         ie->target_cmd);
6742                 exit(1);
6743             }
6744             arg_type++;
6745             size = thunk_type_size(arg_type, 0);
6746             ie->target_cmd = (ie->target_cmd &
6747                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6748                 (size << TARGET_IOC_SIZESHIFT);
6749         }
6750 
6751         /* automatic consistency check if same arch */
6752 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6753     (defined(__x86_64__) && defined(TARGET_X86_64))
6754         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6755             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6756                     ie->name, ie->target_cmd, ie->host_cmd);
6757         }
6758 #endif
6759         ie++;
6760     }
6761 }
6762 
6763 #ifdef TARGET_NR_truncate64
6764 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6765                                          abi_long arg2,
6766                                          abi_long arg3,
6767                                          abi_long arg4)
6768 {
6769     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6770         arg2 = arg3;
6771         arg3 = arg4;
6772     }
6773     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6774 }
6775 #endif
6776 
6777 #ifdef TARGET_NR_ftruncate64
6778 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6779                                           abi_long arg2,
6780                                           abi_long arg3,
6781                                           abi_long arg4)
6782 {
6783     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6784         arg2 = arg3;
6785         arg3 = arg4;
6786     }
6787     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6788 }
6789 #endif
6790 
6791 #if defined(TARGET_NR_timer_settime) || \
6792     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6793 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6794                                                  abi_ulong target_addr)
6795 {
6796     struct target_itimerspec *target_itspec;
6797 
6798     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6799         return -TARGET_EFAULT;
6800     }
6801 
6802     host_itspec->it_interval.tv_sec =
6803                             tswapal(target_itspec->it_interval.tv_sec);
6804     host_itspec->it_interval.tv_nsec =
6805                             tswapal(target_itspec->it_interval.tv_nsec);
6806     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6807     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6808 
6809     unlock_user_struct(target_itspec, target_addr, 1);
6810     return 0;
6811 }
6812 #endif
6813 
6814 #if ((defined(TARGET_NR_timerfd_gettime) || \
6815       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6816     defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6817 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6818                                                struct itimerspec *host_its)
6819 {
6820     struct target_itimerspec *target_itspec;
6821 
6822     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6823         return -TARGET_EFAULT;
6824     }
6825 
6826     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6827     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6828 
6829     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6830     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6831 
6832     unlock_user_struct(target_itspec, target_addr, 0);
6833     return 0;
6834 }
6835 #endif
6836 
6837 #if defined(TARGET_NR_adjtimex) || \
6838     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6839 static inline abi_long target_to_host_timex(struct timex *host_tx,
6840                                             abi_long target_addr)
6841 {
6842     struct target_timex *target_tx;
6843 
6844     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6845         return -TARGET_EFAULT;
6846     }
6847 
6848     __get_user(host_tx->modes, &target_tx->modes);
6849     __get_user(host_tx->offset, &target_tx->offset);
6850     __get_user(host_tx->freq, &target_tx->freq);
6851     __get_user(host_tx->maxerror, &target_tx->maxerror);
6852     __get_user(host_tx->esterror, &target_tx->esterror);
6853     __get_user(host_tx->status, &target_tx->status);
6854     __get_user(host_tx->constant, &target_tx->constant);
6855     __get_user(host_tx->precision, &target_tx->precision);
6856     __get_user(host_tx->tolerance, &target_tx->tolerance);
6857     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6858     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6859     __get_user(host_tx->tick, &target_tx->tick);
6860     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6861     __get_user(host_tx->jitter, &target_tx->jitter);
6862     __get_user(host_tx->shift, &target_tx->shift);
6863     __get_user(host_tx->stabil, &target_tx->stabil);
6864     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6865     __get_user(host_tx->calcnt, &target_tx->calcnt);
6866     __get_user(host_tx->errcnt, &target_tx->errcnt);
6867     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6868     __get_user(host_tx->tai, &target_tx->tai);
6869 
6870     unlock_user_struct(target_tx, target_addr, 0);
6871     return 0;
6872 }
6873 
6874 static inline abi_long host_to_target_timex(abi_long target_addr,
6875                                             struct timex *host_tx)
6876 {
6877     struct target_timex *target_tx;
6878 
6879     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6880         return -TARGET_EFAULT;
6881     }
6882 
6883     __put_user(host_tx->modes, &target_tx->modes);
6884     __put_user(host_tx->offset, &target_tx->offset);
6885     __put_user(host_tx->freq, &target_tx->freq);
6886     __put_user(host_tx->maxerror, &target_tx->maxerror);
6887     __put_user(host_tx->esterror, &target_tx->esterror);
6888     __put_user(host_tx->status, &target_tx->status);
6889     __put_user(host_tx->constant, &target_tx->constant);
6890     __put_user(host_tx->precision, &target_tx->precision);
6891     __put_user(host_tx->tolerance, &target_tx->tolerance);
6892     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6893     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6894     __put_user(host_tx->tick, &target_tx->tick);
6895     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6896     __put_user(host_tx->jitter, &target_tx->jitter);
6897     __put_user(host_tx->shift, &target_tx->shift);
6898     __put_user(host_tx->stabil, &target_tx->stabil);
6899     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6900     __put_user(host_tx->calcnt, &target_tx->calcnt);
6901     __put_user(host_tx->errcnt, &target_tx->errcnt);
6902     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6903     __put_user(host_tx->tai, &target_tx->tai);
6904 
6905     unlock_user_struct(target_tx, target_addr, 1);
6906     return 0;
6907 }
6908 #endif
6909 
6910 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6911                                                abi_ulong target_addr)
6912 {
6913     struct target_sigevent *target_sevp;
6914 
6915     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6916         return -TARGET_EFAULT;
6917     }
6918 
6919     /* This union is awkward on 64 bit systems because it has a 32 bit
6920      * integer and a pointer in it; we follow the conversion approach
6921      * used for handling sigval types in signal.c so the guest should get
6922      * the correct value back even if we did a 64 bit byteswap and it's
6923      * using the 32 bit integer.
6924      */
6925     host_sevp->sigev_value.sival_ptr =
6926         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6927     host_sevp->sigev_signo =
6928         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6929     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6930     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6931 
6932     unlock_user_struct(target_sevp, target_addr, 1);
6933     return 0;
6934 }
6935 
6936 #if defined(TARGET_NR_mlockall)
6937 static inline int target_to_host_mlockall_arg(int arg)
6938 {
6939     int result = 0;
6940 
6941     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6942         result |= MCL_CURRENT;
6943     }
6944     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6945         result |= MCL_FUTURE;
6946     }
6947     return result;
6948 }
6949 #endif
6950 
6951 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6952      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6953      defined(TARGET_NR_newfstatat))
6954 static inline abi_long host_to_target_stat64(void *cpu_env,
6955                                              abi_ulong target_addr,
6956                                              struct stat *host_st)
6957 {
6958 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6959     if (((CPUARMState *)cpu_env)->eabi) {
6960         struct target_eabi_stat64 *target_st;
6961 
6962         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6963             return -TARGET_EFAULT;
6964         memset(target_st, 0, sizeof(struct target_eabi_stat64));
6965         __put_user(host_st->st_dev, &target_st->st_dev);
6966         __put_user(host_st->st_ino, &target_st->st_ino);
6967 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6968         __put_user(host_st->st_ino, &target_st->__st_ino);
6969 #endif
6970         __put_user(host_st->st_mode, &target_st->st_mode);
6971         __put_user(host_st->st_nlink, &target_st->st_nlink);
6972         __put_user(host_st->st_uid, &target_st->st_uid);
6973         __put_user(host_st->st_gid, &target_st->st_gid);
6974         __put_user(host_st->st_rdev, &target_st->st_rdev);
6975         __put_user(host_st->st_size, &target_st->st_size);
6976         __put_user(host_st->st_blksize, &target_st->st_blksize);
6977         __put_user(host_st->st_blocks, &target_st->st_blocks);
6978         __put_user(host_st->st_atime, &target_st->target_st_atime);
6979         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6980         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6981 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6982         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6983         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6984         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6985 #endif
6986         unlock_user_struct(target_st, target_addr, 1);
6987     } else
6988 #endif
6989     {
6990 #if defined(TARGET_HAS_STRUCT_STAT64)
6991         struct target_stat64 *target_st;
6992 #else
6993         struct target_stat *target_st;
6994 #endif
6995 
6996         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6997             return -TARGET_EFAULT;
6998         memset(target_st, 0, sizeof(*target_st));
6999         __put_user(host_st->st_dev, &target_st->st_dev);
7000         __put_user(host_st->st_ino, &target_st->st_ino);
7001 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7002         __put_user(host_st->st_ino, &target_st->__st_ino);
7003 #endif
7004         __put_user(host_st->st_mode, &target_st->st_mode);
7005         __put_user(host_st->st_nlink, &target_st->st_nlink);
7006         __put_user(host_st->st_uid, &target_st->st_uid);
7007         __put_user(host_st->st_gid, &target_st->st_gid);
7008         __put_user(host_st->st_rdev, &target_st->st_rdev);
7009         /* XXX: better use of kernel struct */
7010         __put_user(host_st->st_size, &target_st->st_size);
7011         __put_user(host_st->st_blksize, &target_st->st_blksize);
7012         __put_user(host_st->st_blocks, &target_st->st_blocks);
7013         __put_user(host_st->st_atime, &target_st->target_st_atime);
7014         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7015         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7016 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7017         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7018         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7019         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7020 #endif
7021         unlock_user_struct(target_st, target_addr, 1);
7022     }
7023 
7024     return 0;
7025 }
7026 #endif
7027 
7028 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7029 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7030                                             abi_ulong target_addr)
7031 {
7032     struct target_statx *target_stx;
7033 
7034     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7035         return -TARGET_EFAULT;
7036     }
7037     memset(target_stx, 0, sizeof(*target_stx));
7038 
7039     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7040     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7041     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7042     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7043     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7044     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7045     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7046     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7047     __put_user(host_stx->stx_size, &target_stx->stx_size);
7048     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7049     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7050     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7051     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7052     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7053     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7054     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7055     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7056     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7057     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7058     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7059     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7060     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7061     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7062 
7063     unlock_user_struct(target_stx, target_addr, 1);
7064 
7065     return 0;
7066 }
7067 #endif
7068 
7069 static int do_sys_futex(int *uaddr, int op, int val,
7070                          const struct timespec *timeout, int *uaddr2,
7071                          int val3)
7072 {
7073 #if HOST_LONG_BITS == 64
7074 #if defined(__NR_futex)
7075     /* always a 64-bit time_t, it doesn't define _time64 version  */
7076     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7077 
7078 #endif
7079 #else /* HOST_LONG_BITS == 64 */
7080 #if defined(__NR_futex_time64)
7081     if (sizeof(timeout->tv_sec) == 8) {
7082         /* _time64 function on 32bit arch */
7083         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7084     }
7085 #endif
7086 #if defined(__NR_futex)
7087     /* old function on 32bit arch */
7088     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7089 #endif
7090 #endif /* HOST_LONG_BITS == 64 */
7091     g_assert_not_reached();
7092 }
7093 
7094 static int do_safe_futex(int *uaddr, int op, int val,
7095                          const struct timespec *timeout, int *uaddr2,
7096                          int val3)
7097 {
7098 #if HOST_LONG_BITS == 64
7099 #if defined(__NR_futex)
7100     /* always a 64-bit time_t, it doesn't define _time64 version  */
7101     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7102 #endif
7103 #else /* HOST_LONG_BITS == 64 */
7104 #if defined(__NR_futex_time64)
7105     if (sizeof(timeout->tv_sec) == 8) {
7106         /* _time64 function on 32bit arch */
7107         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7108                                            val3));
7109     }
7110 #endif
7111 #if defined(__NR_futex)
7112     /* old function on 32bit arch */
7113     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7114 #endif
7115 #endif /* HOST_LONG_BITS == 64 */
7116     return -TARGET_ENOSYS;
7117 }
7118 
7119 /* ??? Using host futex calls even when target atomic operations
7120    are not really atomic probably breaks things.  However implementing
7121    futexes locally would make futexes shared between multiple processes
7122    tricky.  However they're probably useless because guest atomic
7123    operations won't work either.  */
7124 #if defined(TARGET_NR_futex)
7125 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7126                     target_ulong uaddr2, int val3)
7127 {
7128     struct timespec ts, *pts;
7129     int base_op;
7130 
7131     /* ??? We assume FUTEX_* constants are the same on both host
7132        and target.  */
7133 #ifdef FUTEX_CMD_MASK
7134     base_op = op & FUTEX_CMD_MASK;
7135 #else
7136     base_op = op;
7137 #endif
7138     switch (base_op) {
7139     case FUTEX_WAIT:
7140     case FUTEX_WAIT_BITSET:
7141         if (timeout) {
7142             pts = &ts;
7143             target_to_host_timespec(pts, timeout);
7144         } else {
7145             pts = NULL;
7146         }
7147         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7148     case FUTEX_WAKE:
7149         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7150     case FUTEX_FD:
7151         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7152     case FUTEX_REQUEUE:
7153     case FUTEX_CMP_REQUEUE:
7154     case FUTEX_WAKE_OP:
7155         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7156            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7157            But the prototype takes a `struct timespec *'; insert casts
7158            to satisfy the compiler.  We do not need to tswap TIMEOUT
7159            since it's not compared to guest memory.  */
7160         pts = (struct timespec *)(uintptr_t) timeout;
7161         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7162                              (base_op == FUTEX_CMP_REQUEUE
7163                                       ? tswap32(val3)
7164                                       : val3));
7165     default:
7166         return -TARGET_ENOSYS;
7167     }
7168 }
7169 #endif
7170 
7171 #if defined(TARGET_NR_futex_time64)
7172 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7173                            target_ulong uaddr2, int val3)
7174 {
7175     struct timespec ts, *pts;
7176     int base_op;
7177 
7178     /* ??? We assume FUTEX_* constants are the same on both host
7179        and target.  */
7180 #ifdef FUTEX_CMD_MASK
7181     base_op = op & FUTEX_CMD_MASK;
7182 #else
7183     base_op = op;
7184 #endif
7185     switch (base_op) {
7186     case FUTEX_WAIT:
7187     case FUTEX_WAIT_BITSET:
7188         if (timeout) {
7189             pts = &ts;
7190             target_to_host_timespec64(pts, timeout);
7191         } else {
7192             pts = NULL;
7193         }
7194         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7195     case FUTEX_WAKE:
7196         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7197     case FUTEX_FD:
7198         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7199     case FUTEX_REQUEUE:
7200     case FUTEX_CMP_REQUEUE:
7201     case FUTEX_WAKE_OP:
7202         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7203            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7204            But the prototype takes a `struct timespec *'; insert casts
7205            to satisfy the compiler.  We do not need to tswap TIMEOUT
7206            since it's not compared to guest memory.  */
7207         pts = (struct timespec *)(uintptr_t) timeout;
7208         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7209                              (base_op == FUTEX_CMP_REQUEUE
7210                                       ? tswap32(val3)
7211                                       : val3));
7212     default:
7213         return -TARGET_ENOSYS;
7214     }
7215 }
7216 #endif
7217 
7218 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7219 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7220                                      abi_long handle, abi_long mount_id,
7221                                      abi_long flags)
7222 {
7223     struct file_handle *target_fh;
7224     struct file_handle *fh;
7225     int mid = 0;
7226     abi_long ret;
7227     char *name;
7228     unsigned int size, total_size;
7229 
7230     if (get_user_s32(size, handle)) {
7231         return -TARGET_EFAULT;
7232     }
7233 
7234     name = lock_user_string(pathname);
7235     if (!name) {
7236         return -TARGET_EFAULT;
7237     }
7238 
7239     total_size = sizeof(struct file_handle) + size;
7240     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7241     if (!target_fh) {
7242         unlock_user(name, pathname, 0);
7243         return -TARGET_EFAULT;
7244     }
7245 
7246     fh = g_malloc0(total_size);
7247     fh->handle_bytes = size;
7248 
7249     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7250     unlock_user(name, pathname, 0);
7251 
7252     /* man name_to_handle_at(2):
7253      * Other than the use of the handle_bytes field, the caller should treat
7254      * the file_handle structure as an opaque data type
7255      */
7256 
7257     memcpy(target_fh, fh, total_size);
7258     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7259     target_fh->handle_type = tswap32(fh->handle_type);
7260     g_free(fh);
7261     unlock_user(target_fh, handle, total_size);
7262 
7263     if (put_user_s32(mid, mount_id)) {
7264         return -TARGET_EFAULT;
7265     }
7266 
7267     return ret;
7268 
7269 }
7270 #endif
7271 
7272 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7273 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7274                                      abi_long flags)
7275 {
7276     struct file_handle *target_fh;
7277     struct file_handle *fh;
7278     unsigned int size, total_size;
7279     abi_long ret;
7280 
7281     if (get_user_s32(size, handle)) {
7282         return -TARGET_EFAULT;
7283     }
7284 
7285     total_size = sizeof(struct file_handle) + size;
7286     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7287     if (!target_fh) {
7288         return -TARGET_EFAULT;
7289     }
7290 
7291     fh = g_memdup(target_fh, total_size);
7292     fh->handle_bytes = size;
7293     fh->handle_type = tswap32(target_fh->handle_type);
7294 
7295     ret = get_errno(open_by_handle_at(mount_fd, fh,
7296                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7297 
7298     g_free(fh);
7299 
7300     unlock_user(target_fh, handle, total_size);
7301 
7302     return ret;
7303 }
7304 #endif
7305 
7306 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7307 
7308 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7309 {
7310     int host_flags;
7311     target_sigset_t *target_mask;
7312     sigset_t host_mask;
7313     abi_long ret;
7314 
7315     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7316         return -TARGET_EINVAL;
7317     }
7318     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     target_to_host_sigset(&host_mask, target_mask);
7323 
7324     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7325 
7326     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7327     if (ret >= 0) {
7328         fd_trans_register(ret, &target_signalfd_trans);
7329     }
7330 
7331     unlock_user_struct(target_mask, mask, 0);
7332 
7333     return ret;
7334 }
7335 #endif
7336 
7337 /* Map host to target signal numbers for the wait family of syscalls.
7338    Assume all other status bits are the same.  */
7339 int host_to_target_waitstatus(int status)
7340 {
7341     if (WIFSIGNALED(status)) {
7342         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7343     }
7344     if (WIFSTOPPED(status)) {
7345         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7346                | (status & 0xff);
7347     }
7348     return status;
7349 }
7350 
7351 static int open_self_cmdline(void *cpu_env, int fd)
7352 {
7353     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7354     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7355     int i;
7356 
7357     for (i = 0; i < bprm->argc; i++) {
7358         size_t len = strlen(bprm->argv[i]) + 1;
7359 
7360         if (write(fd, bprm->argv[i], len) != len) {
7361             return -1;
7362         }
7363     }
7364 
7365     return 0;
7366 }
7367 
7368 static int open_self_maps(void *cpu_env, int fd)
7369 {
7370     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7371     TaskState *ts = cpu->opaque;
7372     GSList *map_info = read_self_maps();
7373     GSList *s;
7374     int count;
7375 
7376     for (s = map_info; s; s = g_slist_next(s)) {
7377         MapInfo *e = (MapInfo *) s->data;
7378 
7379         if (h2g_valid(e->start)) {
7380             unsigned long min = e->start;
7381             unsigned long max = e->end;
7382             int flags = page_get_flags(h2g(min));
7383             const char *path;
7384 
7385             max = h2g_valid(max - 1) ?
7386                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7387 
7388             if (page_check_range(h2g(min), max - min, flags) == -1) {
7389                 continue;
7390             }
7391 
7392             if (h2g(min) == ts->info->stack_limit) {
7393                 path = "[stack]";
7394             } else {
7395                 path = e->path;
7396             }
7397 
7398             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7399                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7400                             h2g(min), h2g(max - 1) + 1,
7401                             e->is_read ? 'r' : '-',
7402                             e->is_write ? 'w' : '-',
7403                             e->is_exec ? 'x' : '-',
7404                             e->is_priv ? 'p' : '-',
7405                             (uint64_t) e->offset, e->dev, e->inode);
7406             if (path) {
7407                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7408             } else {
7409                 dprintf(fd, "\n");
7410             }
7411         }
7412     }
7413 
7414     free_self_maps(map_info);
7415 
7416 #ifdef TARGET_VSYSCALL_PAGE
7417     /*
7418      * We only support execution from the vsyscall page.
7419      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7420      */
7421     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7422                     " --xp 00000000 00:00 0",
7423                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7424     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7425 #endif
7426 
7427     return 0;
7428 }
7429 
7430 static int open_self_stat(void *cpu_env, int fd)
7431 {
7432     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7433     TaskState *ts = cpu->opaque;
7434     g_autoptr(GString) buf = g_string_new(NULL);
7435     int i;
7436 
7437     for (i = 0; i < 44; i++) {
7438         if (i == 0) {
7439             /* pid */
7440             g_string_printf(buf, FMT_pid " ", getpid());
7441         } else if (i == 1) {
7442             /* app name */
7443             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7444             bin = bin ? bin + 1 : ts->bprm->argv[0];
7445             g_string_printf(buf, "(%.15s) ", bin);
7446         } else if (i == 27) {
7447             /* stack bottom */
7448             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7449         } else {
7450             /* for the rest, there is MasterCard */
7451             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7452         }
7453 
7454         if (write(fd, buf->str, buf->len) != buf->len) {
7455             return -1;
7456         }
7457     }
7458 
7459     return 0;
7460 }
7461 
7462 static int open_self_auxv(void *cpu_env, int fd)
7463 {
7464     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7465     TaskState *ts = cpu->opaque;
7466     abi_ulong auxv = ts->info->saved_auxv;
7467     abi_ulong len = ts->info->auxv_len;
7468     char *ptr;
7469 
7470     /*
7471      * Auxiliary vector is stored in target process stack.
7472      * read in whole auxv vector and copy it to file
7473      */
7474     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7475     if (ptr != NULL) {
7476         while (len > 0) {
7477             ssize_t r;
7478             r = write(fd, ptr, len);
7479             if (r <= 0) {
7480                 break;
7481             }
7482             len -= r;
7483             ptr += r;
7484         }
7485         lseek(fd, 0, SEEK_SET);
7486         unlock_user(ptr, auxv, len);
7487     }
7488 
7489     return 0;
7490 }
7491 
7492 static int is_proc_myself(const char *filename, const char *entry)
7493 {
7494     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7495         filename += strlen("/proc/");
7496         if (!strncmp(filename, "self/", strlen("self/"))) {
7497             filename += strlen("self/");
7498         } else if (*filename >= '1' && *filename <= '9') {
7499             char myself[80];
7500             snprintf(myself, sizeof(myself), "%d/", getpid());
7501             if (!strncmp(filename, myself, strlen(myself))) {
7502                 filename += strlen(myself);
7503             } else {
7504                 return 0;
7505             }
7506         } else {
7507             return 0;
7508         }
7509         if (!strcmp(filename, entry)) {
7510             return 1;
7511         }
7512     }
7513     return 0;
7514 }
7515 
7516 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7517     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7518 static int is_proc(const char *filename, const char *entry)
7519 {
7520     return strcmp(filename, entry) == 0;
7521 }
7522 #endif
7523 
7524 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7525 static int open_net_route(void *cpu_env, int fd)
7526 {
7527     FILE *fp;
7528     char *line = NULL;
7529     size_t len = 0;
7530     ssize_t read;
7531 
7532     fp = fopen("/proc/net/route", "r");
7533     if (fp == NULL) {
7534         return -1;
7535     }
7536 
7537     /* read header */
7538 
7539     read = getline(&line, &len, fp);
7540     dprintf(fd, "%s", line);
7541 
7542     /* read routes */
7543 
7544     while ((read = getline(&line, &len, fp)) != -1) {
7545         char iface[16];
7546         uint32_t dest, gw, mask;
7547         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7548         int fields;
7549 
7550         fields = sscanf(line,
7551                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7552                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7553                         &mask, &mtu, &window, &irtt);
7554         if (fields != 11) {
7555             continue;
7556         }
7557         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7558                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7559                 metric, tswap32(mask), mtu, window, irtt);
7560     }
7561 
7562     free(line);
7563     fclose(fp);
7564 
7565     return 0;
7566 }
7567 #endif
7568 
7569 #if defined(TARGET_SPARC)
7570 static int open_cpuinfo(void *cpu_env, int fd)
7571 {
7572     dprintf(fd, "type\t\t: sun4u\n");
7573     return 0;
7574 }
7575 #endif
7576 
7577 #if defined(TARGET_HPPA)
7578 static int open_cpuinfo(void *cpu_env, int fd)
7579 {
7580     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7581     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7582     dprintf(fd, "capabilities\t: os32\n");
7583     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7584     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7585     return 0;
7586 }
7587 #endif
7588 
7589 #if defined(TARGET_M68K)
7590 static int open_hardware(void *cpu_env, int fd)
7591 {
7592     dprintf(fd, "Model:\t\tqemu-m68k\n");
7593     return 0;
7594 }
7595 #endif
7596 
7597 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7598 {
7599     struct fake_open {
7600         const char *filename;
7601         int (*fill)(void *cpu_env, int fd);
7602         int (*cmp)(const char *s1, const char *s2);
7603     };
7604     const struct fake_open *fake_open;
7605     static const struct fake_open fakes[] = {
7606         { "maps", open_self_maps, is_proc_myself },
7607         { "stat", open_self_stat, is_proc_myself },
7608         { "auxv", open_self_auxv, is_proc_myself },
7609         { "cmdline", open_self_cmdline, is_proc_myself },
7610 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7611         { "/proc/net/route", open_net_route, is_proc },
7612 #endif
7613 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7614         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7615 #endif
7616 #if defined(TARGET_M68K)
7617         { "/proc/hardware", open_hardware, is_proc },
7618 #endif
7619         { NULL, NULL, NULL }
7620     };
7621 
7622     if (is_proc_myself(pathname, "exe")) {
7623         int execfd = qemu_getauxval(AT_EXECFD);
7624         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7625     }
7626 
7627     for (fake_open = fakes; fake_open->filename; fake_open++) {
7628         if (fake_open->cmp(pathname, fake_open->filename)) {
7629             break;
7630         }
7631     }
7632 
7633     if (fake_open->filename) {
7634         const char *tmpdir;
7635         char filename[PATH_MAX];
7636         int fd, r;
7637 
7638         /* create temporary file to map stat to */
7639         tmpdir = getenv("TMPDIR");
7640         if (!tmpdir)
7641             tmpdir = "/tmp";
7642         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7643         fd = mkstemp(filename);
7644         if (fd < 0) {
7645             return fd;
7646         }
7647         unlink(filename);
7648 
7649         if ((r = fake_open->fill(cpu_env, fd))) {
7650             int e = errno;
7651             close(fd);
7652             errno = e;
7653             return r;
7654         }
7655         lseek(fd, 0, SEEK_SET);
7656 
7657         return fd;
7658     }
7659 
7660     return safe_openat(dirfd, path(pathname), flags, mode);
7661 }
7662 
7663 #define TIMER_MAGIC 0x0caf0000
7664 #define TIMER_MAGIC_MASK 0xffff0000
7665 
7666 /* Convert QEMU provided timer ID back to internal 16bit index format */
7667 static target_timer_t get_timer_id(abi_long arg)
7668 {
7669     target_timer_t timerid = arg;
7670 
7671     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7672         return -TARGET_EINVAL;
7673     }
7674 
7675     timerid &= 0xffff;
7676 
7677     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7678         return -TARGET_EINVAL;
7679     }
7680 
7681     return timerid;
7682 }
7683 
7684 static int target_to_host_cpu_mask(unsigned long *host_mask,
7685                                    size_t host_size,
7686                                    abi_ulong target_addr,
7687                                    size_t target_size)
7688 {
7689     unsigned target_bits = sizeof(abi_ulong) * 8;
7690     unsigned host_bits = sizeof(*host_mask) * 8;
7691     abi_ulong *target_mask;
7692     unsigned i, j;
7693 
7694     assert(host_size >= target_size);
7695 
7696     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7697     if (!target_mask) {
7698         return -TARGET_EFAULT;
7699     }
7700     memset(host_mask, 0, host_size);
7701 
7702     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7703         unsigned bit = i * target_bits;
7704         abi_ulong val;
7705 
7706         __get_user(val, &target_mask[i]);
7707         for (j = 0; j < target_bits; j++, bit++) {
7708             if (val & (1UL << j)) {
7709                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7710             }
7711         }
7712     }
7713 
7714     unlock_user(target_mask, target_addr, 0);
7715     return 0;
7716 }
7717 
7718 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7719                                    size_t host_size,
7720                                    abi_ulong target_addr,
7721                                    size_t target_size)
7722 {
7723     unsigned target_bits = sizeof(abi_ulong) * 8;
7724     unsigned host_bits = sizeof(*host_mask) * 8;
7725     abi_ulong *target_mask;
7726     unsigned i, j;
7727 
7728     assert(host_size >= target_size);
7729 
7730     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7731     if (!target_mask) {
7732         return -TARGET_EFAULT;
7733     }
7734 
7735     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7736         unsigned bit = i * target_bits;
7737         abi_ulong val = 0;
7738 
7739         for (j = 0; j < target_bits; j++, bit++) {
7740             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7741                 val |= 1UL << j;
7742             }
7743         }
7744         __put_user(val, &target_mask[i]);
7745     }
7746 
7747     unlock_user(target_mask, target_addr, target_size);
7748     return 0;
7749 }
7750 
7751 /* This is an internal helper for do_syscall so that it is easier
7752  * to have a single return point, so that actions, such as logging
7753  * of syscall results, can be performed.
7754  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7755  */
7756 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7757                             abi_long arg2, abi_long arg3, abi_long arg4,
7758                             abi_long arg5, abi_long arg6, abi_long arg7,
7759                             abi_long arg8)
7760 {
7761     CPUState *cpu = env_cpu(cpu_env);
7762     abi_long ret;
7763 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7764     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7765     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7766     || defined(TARGET_NR_statx)
7767     struct stat st;
7768 #endif
7769 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7770     || defined(TARGET_NR_fstatfs)
7771     struct statfs stfs;
7772 #endif
7773     void *p;
7774 
7775     switch(num) {
7776     case TARGET_NR_exit:
7777         /* In old applications this may be used to implement _exit(2).
7778            However in threaded applictions it is used for thread termination,
7779            and _exit_group is used for application termination.
7780            Do thread termination if we have more then one thread.  */
7781 
7782         if (block_signals()) {
7783             return -TARGET_ERESTARTSYS;
7784         }
7785 
7786         pthread_mutex_lock(&clone_lock);
7787 
7788         if (CPU_NEXT(first_cpu)) {
7789             TaskState *ts = cpu->opaque;
7790 
7791             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7792             object_unref(OBJECT(cpu));
7793             /*
7794              * At this point the CPU should be unrealized and removed
7795              * from cpu lists. We can clean-up the rest of the thread
7796              * data without the lock held.
7797              */
7798 
7799             pthread_mutex_unlock(&clone_lock);
7800 
7801             if (ts->child_tidptr) {
7802                 put_user_u32(0, ts->child_tidptr);
7803                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7804                           NULL, NULL, 0);
7805             }
7806             thread_cpu = NULL;
7807             g_free(ts);
7808             rcu_unregister_thread();
7809             pthread_exit(NULL);
7810         }
7811 
7812         pthread_mutex_unlock(&clone_lock);
7813         preexit_cleanup(cpu_env, arg1);
7814         _exit(arg1);
7815         return 0; /* avoid warning */
7816     case TARGET_NR_read:
7817         if (arg2 == 0 && arg3 == 0) {
7818             return get_errno(safe_read(arg1, 0, 0));
7819         } else {
7820             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7821                 return -TARGET_EFAULT;
7822             ret = get_errno(safe_read(arg1, p, arg3));
7823             if (ret >= 0 &&
7824                 fd_trans_host_to_target_data(arg1)) {
7825                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7826             }
7827             unlock_user(p, arg2, ret);
7828         }
7829         return ret;
7830     case TARGET_NR_write:
7831         if (arg2 == 0 && arg3 == 0) {
7832             return get_errno(safe_write(arg1, 0, 0));
7833         }
7834         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7835             return -TARGET_EFAULT;
7836         if (fd_trans_target_to_host_data(arg1)) {
7837             void *copy = g_malloc(arg3);
7838             memcpy(copy, p, arg3);
7839             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7840             if (ret >= 0) {
7841                 ret = get_errno(safe_write(arg1, copy, ret));
7842             }
7843             g_free(copy);
7844         } else {
7845             ret = get_errno(safe_write(arg1, p, arg3));
7846         }
7847         unlock_user(p, arg2, 0);
7848         return ret;
7849 
7850 #ifdef TARGET_NR_open
7851     case TARGET_NR_open:
7852         if (!(p = lock_user_string(arg1)))
7853             return -TARGET_EFAULT;
7854         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7855                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7856                                   arg3));
7857         fd_trans_unregister(ret);
7858         unlock_user(p, arg1, 0);
7859         return ret;
7860 #endif
7861     case TARGET_NR_openat:
7862         if (!(p = lock_user_string(arg2)))
7863             return -TARGET_EFAULT;
7864         ret = get_errno(do_openat(cpu_env, arg1, p,
7865                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7866                                   arg4));
7867         fd_trans_unregister(ret);
7868         unlock_user(p, arg2, 0);
7869         return ret;
7870 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7871     case TARGET_NR_name_to_handle_at:
7872         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7873         return ret;
7874 #endif
7875 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7876     case TARGET_NR_open_by_handle_at:
7877         ret = do_open_by_handle_at(arg1, arg2, arg3);
7878         fd_trans_unregister(ret);
7879         return ret;
7880 #endif
7881     case TARGET_NR_close:
7882         fd_trans_unregister(arg1);
7883         return get_errno(close(arg1));
7884 
7885     case TARGET_NR_brk:
7886         return do_brk(arg1);
7887 #ifdef TARGET_NR_fork
7888     case TARGET_NR_fork:
7889         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7890 #endif
7891 #ifdef TARGET_NR_waitpid
7892     case TARGET_NR_waitpid:
7893         {
7894             int status;
7895             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7896             if (!is_error(ret) && arg2 && ret
7897                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7898                 return -TARGET_EFAULT;
7899         }
7900         return ret;
7901 #endif
7902 #ifdef TARGET_NR_waitid
7903     case TARGET_NR_waitid:
7904         {
7905             siginfo_t info;
7906             info.si_pid = 0;
7907             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7908             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7909                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7910                     return -TARGET_EFAULT;
7911                 host_to_target_siginfo(p, &info);
7912                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7913             }
7914         }
7915         return ret;
7916 #endif
7917 #ifdef TARGET_NR_creat /* not on alpha */
7918     case TARGET_NR_creat:
7919         if (!(p = lock_user_string(arg1)))
7920             return -TARGET_EFAULT;
7921         ret = get_errno(creat(p, arg2));
7922         fd_trans_unregister(ret);
7923         unlock_user(p, arg1, 0);
7924         return ret;
7925 #endif
7926 #ifdef TARGET_NR_link
7927     case TARGET_NR_link:
7928         {
7929             void * p2;
7930             p = lock_user_string(arg1);
7931             p2 = lock_user_string(arg2);
7932             if (!p || !p2)
7933                 ret = -TARGET_EFAULT;
7934             else
7935                 ret = get_errno(link(p, p2));
7936             unlock_user(p2, arg2, 0);
7937             unlock_user(p, arg1, 0);
7938         }
7939         return ret;
7940 #endif
7941 #if defined(TARGET_NR_linkat)
7942     case TARGET_NR_linkat:
7943         {
7944             void * p2 = NULL;
7945             if (!arg2 || !arg4)
7946                 return -TARGET_EFAULT;
7947             p  = lock_user_string(arg2);
7948             p2 = lock_user_string(arg4);
7949             if (!p || !p2)
7950                 ret = -TARGET_EFAULT;
7951             else
7952                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7953             unlock_user(p, arg2, 0);
7954             unlock_user(p2, arg4, 0);
7955         }
7956         return ret;
7957 #endif
7958 #ifdef TARGET_NR_unlink
7959     case TARGET_NR_unlink:
7960         if (!(p = lock_user_string(arg1)))
7961             return -TARGET_EFAULT;
7962         ret = get_errno(unlink(p));
7963         unlock_user(p, arg1, 0);
7964         return ret;
7965 #endif
7966 #if defined(TARGET_NR_unlinkat)
7967     case TARGET_NR_unlinkat:
7968         if (!(p = lock_user_string(arg2)))
7969             return -TARGET_EFAULT;
7970         ret = get_errno(unlinkat(arg1, p, arg3));
7971         unlock_user(p, arg2, 0);
7972         return ret;
7973 #endif
7974     case TARGET_NR_execve:
7975         {
7976             char **argp, **envp;
7977             int argc, envc;
7978             abi_ulong gp;
7979             abi_ulong guest_argp;
7980             abi_ulong guest_envp;
7981             abi_ulong addr;
7982             char **q;
7983             int total_size = 0;
7984 
7985             argc = 0;
7986             guest_argp = arg2;
7987             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7988                 if (get_user_ual(addr, gp))
7989                     return -TARGET_EFAULT;
7990                 if (!addr)
7991                     break;
7992                 argc++;
7993             }
7994             envc = 0;
7995             guest_envp = arg3;
7996             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7997                 if (get_user_ual(addr, gp))
7998                     return -TARGET_EFAULT;
7999                 if (!addr)
8000                     break;
8001                 envc++;
8002             }
8003 
8004             argp = g_new0(char *, argc + 1);
8005             envp = g_new0(char *, envc + 1);
8006 
8007             for (gp = guest_argp, q = argp; gp;
8008                   gp += sizeof(abi_ulong), q++) {
8009                 if (get_user_ual(addr, gp))
8010                     goto execve_efault;
8011                 if (!addr)
8012                     break;
8013                 if (!(*q = lock_user_string(addr)))
8014                     goto execve_efault;
8015                 total_size += strlen(*q) + 1;
8016             }
8017             *q = NULL;
8018 
8019             for (gp = guest_envp, q = envp; gp;
8020                   gp += sizeof(abi_ulong), q++) {
8021                 if (get_user_ual(addr, gp))
8022                     goto execve_efault;
8023                 if (!addr)
8024                     break;
8025                 if (!(*q = lock_user_string(addr)))
8026                     goto execve_efault;
8027                 total_size += strlen(*q) + 1;
8028             }
8029             *q = NULL;
8030 
8031             if (!(p = lock_user_string(arg1)))
8032                 goto execve_efault;
8033             /* Although execve() is not an interruptible syscall it is
8034              * a special case where we must use the safe_syscall wrapper:
8035              * if we allow a signal to happen before we make the host
8036              * syscall then we will 'lose' it, because at the point of
8037              * execve the process leaves QEMU's control. So we use the
8038              * safe syscall wrapper to ensure that we either take the
8039              * signal as a guest signal, or else it does not happen
8040              * before the execve completes and makes it the other
8041              * program's problem.
8042              */
8043             ret = get_errno(safe_execve(p, argp, envp));
8044             unlock_user(p, arg1, 0);
8045 
8046             goto execve_end;
8047 
8048         execve_efault:
8049             ret = -TARGET_EFAULT;
8050 
8051         execve_end:
8052             for (gp = guest_argp, q = argp; *q;
8053                   gp += sizeof(abi_ulong), q++) {
8054                 if (get_user_ual(addr, gp)
8055                     || !addr)
8056                     break;
8057                 unlock_user(*q, addr, 0);
8058             }
8059             for (gp = guest_envp, q = envp; *q;
8060                   gp += sizeof(abi_ulong), q++) {
8061                 if (get_user_ual(addr, gp)
8062                     || !addr)
8063                     break;
8064                 unlock_user(*q, addr, 0);
8065             }
8066 
8067             g_free(argp);
8068             g_free(envp);
8069         }
8070         return ret;
8071     case TARGET_NR_chdir:
8072         if (!(p = lock_user_string(arg1)))
8073             return -TARGET_EFAULT;
8074         ret = get_errno(chdir(p));
8075         unlock_user(p, arg1, 0);
8076         return ret;
8077 #ifdef TARGET_NR_time
8078     case TARGET_NR_time:
8079         {
8080             time_t host_time;
8081             ret = get_errno(time(&host_time));
8082             if (!is_error(ret)
8083                 && arg1
8084                 && put_user_sal(host_time, arg1))
8085                 return -TARGET_EFAULT;
8086         }
8087         return ret;
8088 #endif
8089 #ifdef TARGET_NR_mknod
8090     case TARGET_NR_mknod:
8091         if (!(p = lock_user_string(arg1)))
8092             return -TARGET_EFAULT;
8093         ret = get_errno(mknod(p, arg2, arg3));
8094         unlock_user(p, arg1, 0);
8095         return ret;
8096 #endif
8097 #if defined(TARGET_NR_mknodat)
8098     case TARGET_NR_mknodat:
8099         if (!(p = lock_user_string(arg2)))
8100             return -TARGET_EFAULT;
8101         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8102         unlock_user(p, arg2, 0);
8103         return ret;
8104 #endif
8105 #ifdef TARGET_NR_chmod
8106     case TARGET_NR_chmod:
8107         if (!(p = lock_user_string(arg1)))
8108             return -TARGET_EFAULT;
8109         ret = get_errno(chmod(p, arg2));
8110         unlock_user(p, arg1, 0);
8111         return ret;
8112 #endif
8113 #ifdef TARGET_NR_lseek
8114     case TARGET_NR_lseek:
8115         return get_errno(lseek(arg1, arg2, arg3));
8116 #endif
8117 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8118     /* Alpha specific */
8119     case TARGET_NR_getxpid:
8120         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8121         return get_errno(getpid());
8122 #endif
8123 #ifdef TARGET_NR_getpid
8124     case TARGET_NR_getpid:
8125         return get_errno(getpid());
8126 #endif
8127     case TARGET_NR_mount:
8128         {
8129             /* need to look at the data field */
8130             void *p2, *p3;
8131 
8132             if (arg1) {
8133                 p = lock_user_string(arg1);
8134                 if (!p) {
8135                     return -TARGET_EFAULT;
8136                 }
8137             } else {
8138                 p = NULL;
8139             }
8140 
8141             p2 = lock_user_string(arg2);
8142             if (!p2) {
8143                 if (arg1) {
8144                     unlock_user(p, arg1, 0);
8145                 }
8146                 return -TARGET_EFAULT;
8147             }
8148 
8149             if (arg3) {
8150                 p3 = lock_user_string(arg3);
8151                 if (!p3) {
8152                     if (arg1) {
8153                         unlock_user(p, arg1, 0);
8154                     }
8155                     unlock_user(p2, arg2, 0);
8156                     return -TARGET_EFAULT;
8157                 }
8158             } else {
8159                 p3 = NULL;
8160             }
8161 
8162             /* FIXME - arg5 should be locked, but it isn't clear how to
8163              * do that since it's not guaranteed to be a NULL-terminated
8164              * string.
8165              */
8166             if (!arg5) {
8167                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8168             } else {
8169                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8170             }
8171             ret = get_errno(ret);
8172 
8173             if (arg1) {
8174                 unlock_user(p, arg1, 0);
8175             }
8176             unlock_user(p2, arg2, 0);
8177             if (arg3) {
8178                 unlock_user(p3, arg3, 0);
8179             }
8180         }
8181         return ret;
8182 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8183 #if defined(TARGET_NR_umount)
8184     case TARGET_NR_umount:
8185 #endif
8186 #if defined(TARGET_NR_oldumount)
8187     case TARGET_NR_oldumount:
8188 #endif
8189         if (!(p = lock_user_string(arg1)))
8190             return -TARGET_EFAULT;
8191         ret = get_errno(umount(p));
8192         unlock_user(p, arg1, 0);
8193         return ret;
8194 #endif
8195 #ifdef TARGET_NR_stime /* not on alpha */
8196     case TARGET_NR_stime:
8197         {
8198             struct timespec ts;
8199             ts.tv_nsec = 0;
8200             if (get_user_sal(ts.tv_sec, arg1)) {
8201                 return -TARGET_EFAULT;
8202             }
8203             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8204         }
8205 #endif
8206 #ifdef TARGET_NR_alarm /* not on alpha */
8207     case TARGET_NR_alarm:
8208         return alarm(arg1);
8209 #endif
8210 #ifdef TARGET_NR_pause /* not on alpha */
8211     case TARGET_NR_pause:
8212         if (!block_signals()) {
8213             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8214         }
8215         return -TARGET_EINTR;
8216 #endif
8217 #ifdef TARGET_NR_utime
8218     case TARGET_NR_utime:
8219         {
8220             struct utimbuf tbuf, *host_tbuf;
8221             struct target_utimbuf *target_tbuf;
8222             if (arg2) {
8223                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8224                     return -TARGET_EFAULT;
8225                 tbuf.actime = tswapal(target_tbuf->actime);
8226                 tbuf.modtime = tswapal(target_tbuf->modtime);
8227                 unlock_user_struct(target_tbuf, arg2, 0);
8228                 host_tbuf = &tbuf;
8229             } else {
8230                 host_tbuf = NULL;
8231             }
8232             if (!(p = lock_user_string(arg1)))
8233                 return -TARGET_EFAULT;
8234             ret = get_errno(utime(p, host_tbuf));
8235             unlock_user(p, arg1, 0);
8236         }
8237         return ret;
8238 #endif
8239 #ifdef TARGET_NR_utimes
8240     case TARGET_NR_utimes:
8241         {
8242             struct timeval *tvp, tv[2];
8243             if (arg2) {
8244                 if (copy_from_user_timeval(&tv[0], arg2)
8245                     || copy_from_user_timeval(&tv[1],
8246                                               arg2 + sizeof(struct target_timeval)))
8247                     return -TARGET_EFAULT;
8248                 tvp = tv;
8249             } else {
8250                 tvp = NULL;
8251             }
8252             if (!(p = lock_user_string(arg1)))
8253                 return -TARGET_EFAULT;
8254             ret = get_errno(utimes(p, tvp));
8255             unlock_user(p, arg1, 0);
8256         }
8257         return ret;
8258 #endif
8259 #if defined(TARGET_NR_futimesat)
8260     case TARGET_NR_futimesat:
8261         {
8262             struct timeval *tvp, tv[2];
8263             if (arg3) {
8264                 if (copy_from_user_timeval(&tv[0], arg3)
8265                     || copy_from_user_timeval(&tv[1],
8266                                               arg3 + sizeof(struct target_timeval)))
8267                     return -TARGET_EFAULT;
8268                 tvp = tv;
8269             } else {
8270                 tvp = NULL;
8271             }
8272             if (!(p = lock_user_string(arg2))) {
8273                 return -TARGET_EFAULT;
8274             }
8275             ret = get_errno(futimesat(arg1, path(p), tvp));
8276             unlock_user(p, arg2, 0);
8277         }
8278         return ret;
8279 #endif
8280 #ifdef TARGET_NR_access
8281     case TARGET_NR_access:
8282         if (!(p = lock_user_string(arg1))) {
8283             return -TARGET_EFAULT;
8284         }
8285         ret = get_errno(access(path(p), arg2));
8286         unlock_user(p, arg1, 0);
8287         return ret;
8288 #endif
8289 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8290     case TARGET_NR_faccessat:
8291         if (!(p = lock_user_string(arg2))) {
8292             return -TARGET_EFAULT;
8293         }
8294         ret = get_errno(faccessat(arg1, p, arg3, 0));
8295         unlock_user(p, arg2, 0);
8296         return ret;
8297 #endif
8298 #ifdef TARGET_NR_nice /* not on alpha */
8299     case TARGET_NR_nice:
8300         return get_errno(nice(arg1));
8301 #endif
8302     case TARGET_NR_sync:
8303         sync();
8304         return 0;
8305 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8306     case TARGET_NR_syncfs:
8307         return get_errno(syncfs(arg1));
8308 #endif
8309     case TARGET_NR_kill:
8310         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8311 #ifdef TARGET_NR_rename
8312     case TARGET_NR_rename:
8313         {
8314             void *p2;
8315             p = lock_user_string(arg1);
8316             p2 = lock_user_string(arg2);
8317             if (!p || !p2)
8318                 ret = -TARGET_EFAULT;
8319             else
8320                 ret = get_errno(rename(p, p2));
8321             unlock_user(p2, arg2, 0);
8322             unlock_user(p, arg1, 0);
8323         }
8324         return ret;
8325 #endif
8326 #if defined(TARGET_NR_renameat)
8327     case TARGET_NR_renameat:
8328         {
8329             void *p2;
8330             p  = lock_user_string(arg2);
8331             p2 = lock_user_string(arg4);
8332             if (!p || !p2)
8333                 ret = -TARGET_EFAULT;
8334             else
8335                 ret = get_errno(renameat(arg1, p, arg3, p2));
8336             unlock_user(p2, arg4, 0);
8337             unlock_user(p, arg2, 0);
8338         }
8339         return ret;
8340 #endif
8341 #if defined(TARGET_NR_renameat2)
8342     case TARGET_NR_renameat2:
8343         {
8344             void *p2;
8345             p  = lock_user_string(arg2);
8346             p2 = lock_user_string(arg4);
8347             if (!p || !p2) {
8348                 ret = -TARGET_EFAULT;
8349             } else {
8350                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8351             }
8352             unlock_user(p2, arg4, 0);
8353             unlock_user(p, arg2, 0);
8354         }
8355         return ret;
8356 #endif
8357 #ifdef TARGET_NR_mkdir
8358     case TARGET_NR_mkdir:
8359         if (!(p = lock_user_string(arg1)))
8360             return -TARGET_EFAULT;
8361         ret = get_errno(mkdir(p, arg2));
8362         unlock_user(p, arg1, 0);
8363         return ret;
8364 #endif
8365 #if defined(TARGET_NR_mkdirat)
8366     case TARGET_NR_mkdirat:
8367         if (!(p = lock_user_string(arg2)))
8368             return -TARGET_EFAULT;
8369         ret = get_errno(mkdirat(arg1, p, arg3));
8370         unlock_user(p, arg2, 0);
8371         return ret;
8372 #endif
8373 #ifdef TARGET_NR_rmdir
8374     case TARGET_NR_rmdir:
8375         if (!(p = lock_user_string(arg1)))
8376             return -TARGET_EFAULT;
8377         ret = get_errno(rmdir(p));
8378         unlock_user(p, arg1, 0);
8379         return ret;
8380 #endif
8381     case TARGET_NR_dup:
8382         ret = get_errno(dup(arg1));
8383         if (ret >= 0) {
8384             fd_trans_dup(arg1, ret);
8385         }
8386         return ret;
8387 #ifdef TARGET_NR_pipe
8388     case TARGET_NR_pipe:
8389         return do_pipe(cpu_env, arg1, 0, 0);
8390 #endif
8391 #ifdef TARGET_NR_pipe2
8392     case TARGET_NR_pipe2:
8393         return do_pipe(cpu_env, arg1,
8394                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8395 #endif
8396     case TARGET_NR_times:
8397         {
8398             struct target_tms *tmsp;
8399             struct tms tms;
8400             ret = get_errno(times(&tms));
8401             if (arg1) {
8402                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8403                 if (!tmsp)
8404                     return -TARGET_EFAULT;
8405                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8406                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8407                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8408                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8409             }
8410             if (!is_error(ret))
8411                 ret = host_to_target_clock_t(ret);
8412         }
8413         return ret;
8414     case TARGET_NR_acct:
8415         if (arg1 == 0) {
8416             ret = get_errno(acct(NULL));
8417         } else {
8418             if (!(p = lock_user_string(arg1))) {
8419                 return -TARGET_EFAULT;
8420             }
8421             ret = get_errno(acct(path(p)));
8422             unlock_user(p, arg1, 0);
8423         }
8424         return ret;
8425 #ifdef TARGET_NR_umount2
8426     case TARGET_NR_umount2:
8427         if (!(p = lock_user_string(arg1)))
8428             return -TARGET_EFAULT;
8429         ret = get_errno(umount2(p, arg2));
8430         unlock_user(p, arg1, 0);
8431         return ret;
8432 #endif
8433     case TARGET_NR_ioctl:
8434         return do_ioctl(arg1, arg2, arg3);
8435 #ifdef TARGET_NR_fcntl
8436     case TARGET_NR_fcntl:
8437         return do_fcntl(arg1, arg2, arg3);
8438 #endif
8439     case TARGET_NR_setpgid:
8440         return get_errno(setpgid(arg1, arg2));
8441     case TARGET_NR_umask:
8442         return get_errno(umask(arg1));
8443     case TARGET_NR_chroot:
8444         if (!(p = lock_user_string(arg1)))
8445             return -TARGET_EFAULT;
8446         ret = get_errno(chroot(p));
8447         unlock_user(p, arg1, 0);
8448         return ret;
8449 #ifdef TARGET_NR_dup2
8450     case TARGET_NR_dup2:
8451         ret = get_errno(dup2(arg1, arg2));
8452         if (ret >= 0) {
8453             fd_trans_dup(arg1, arg2);
8454         }
8455         return ret;
8456 #endif
8457 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8458     case TARGET_NR_dup3:
8459     {
8460         int host_flags;
8461 
8462         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8463             return -EINVAL;
8464         }
8465         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8466         ret = get_errno(dup3(arg1, arg2, host_flags));
8467         if (ret >= 0) {
8468             fd_trans_dup(arg1, arg2);
8469         }
8470         return ret;
8471     }
8472 #endif
8473 #ifdef TARGET_NR_getppid /* not on alpha */
8474     case TARGET_NR_getppid:
8475         return get_errno(getppid());
8476 #endif
8477 #ifdef TARGET_NR_getpgrp
8478     case TARGET_NR_getpgrp:
8479         return get_errno(getpgrp());
8480 #endif
8481     case TARGET_NR_setsid:
8482         return get_errno(setsid());
8483 #ifdef TARGET_NR_sigaction
8484     case TARGET_NR_sigaction:
8485         {
8486 #if defined(TARGET_ALPHA)
8487             struct target_sigaction act, oact, *pact = 0;
8488             struct target_old_sigaction *old_act;
8489             if (arg2) {
8490                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8491                     return -TARGET_EFAULT;
8492                 act._sa_handler = old_act->_sa_handler;
8493                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8494                 act.sa_flags = old_act->sa_flags;
8495                 act.sa_restorer = 0;
8496                 unlock_user_struct(old_act, arg2, 0);
8497                 pact = &act;
8498             }
8499             ret = get_errno(do_sigaction(arg1, pact, &oact));
8500             if (!is_error(ret) && arg3) {
8501                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8502                     return -TARGET_EFAULT;
8503                 old_act->_sa_handler = oact._sa_handler;
8504                 old_act->sa_mask = oact.sa_mask.sig[0];
8505                 old_act->sa_flags = oact.sa_flags;
8506                 unlock_user_struct(old_act, arg3, 1);
8507             }
8508 #elif defined(TARGET_MIPS)
8509 	    struct target_sigaction act, oact, *pact, *old_act;
8510 
8511 	    if (arg2) {
8512                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8513                     return -TARGET_EFAULT;
8514 		act._sa_handler = old_act->_sa_handler;
8515 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8516 		act.sa_flags = old_act->sa_flags;
8517 		unlock_user_struct(old_act, arg2, 0);
8518 		pact = &act;
8519 	    } else {
8520 		pact = NULL;
8521 	    }
8522 
8523 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8524 
8525 	    if (!is_error(ret) && arg3) {
8526                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8527                     return -TARGET_EFAULT;
8528 		old_act->_sa_handler = oact._sa_handler;
8529 		old_act->sa_flags = oact.sa_flags;
8530 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8531 		old_act->sa_mask.sig[1] = 0;
8532 		old_act->sa_mask.sig[2] = 0;
8533 		old_act->sa_mask.sig[3] = 0;
8534 		unlock_user_struct(old_act, arg3, 1);
8535 	    }
8536 #else
8537             struct target_old_sigaction *old_act;
8538             struct target_sigaction act, oact, *pact;
8539             if (arg2) {
8540                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8541                     return -TARGET_EFAULT;
8542                 act._sa_handler = old_act->_sa_handler;
8543                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8544                 act.sa_flags = old_act->sa_flags;
8545                 act.sa_restorer = old_act->sa_restorer;
8546 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8547                 act.ka_restorer = 0;
8548 #endif
8549                 unlock_user_struct(old_act, arg2, 0);
8550                 pact = &act;
8551             } else {
8552                 pact = NULL;
8553             }
8554             ret = get_errno(do_sigaction(arg1, pact, &oact));
8555             if (!is_error(ret) && arg3) {
8556                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8557                     return -TARGET_EFAULT;
8558                 old_act->_sa_handler = oact._sa_handler;
8559                 old_act->sa_mask = oact.sa_mask.sig[0];
8560                 old_act->sa_flags = oact.sa_flags;
8561                 old_act->sa_restorer = oact.sa_restorer;
8562                 unlock_user_struct(old_act, arg3, 1);
8563             }
8564 #endif
8565         }
8566         return ret;
8567 #endif
8568     case TARGET_NR_rt_sigaction:
8569         {
8570 #if defined(TARGET_ALPHA)
8571             /* For Alpha and SPARC this is a 5 argument syscall, with
8572              * a 'restorer' parameter which must be copied into the
8573              * sa_restorer field of the sigaction struct.
8574              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8575              * and arg5 is the sigsetsize.
8576              * Alpha also has a separate rt_sigaction struct that it uses
8577              * here; SPARC uses the usual sigaction struct.
8578              */
8579             struct target_rt_sigaction *rt_act;
8580             struct target_sigaction act, oact, *pact = 0;
8581 
8582             if (arg4 != sizeof(target_sigset_t)) {
8583                 return -TARGET_EINVAL;
8584             }
8585             if (arg2) {
8586                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8587                     return -TARGET_EFAULT;
8588                 act._sa_handler = rt_act->_sa_handler;
8589                 act.sa_mask = rt_act->sa_mask;
8590                 act.sa_flags = rt_act->sa_flags;
8591                 act.sa_restorer = arg5;
8592                 unlock_user_struct(rt_act, arg2, 0);
8593                 pact = &act;
8594             }
8595             ret = get_errno(do_sigaction(arg1, pact, &oact));
8596             if (!is_error(ret) && arg3) {
8597                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8598                     return -TARGET_EFAULT;
8599                 rt_act->_sa_handler = oact._sa_handler;
8600                 rt_act->sa_mask = oact.sa_mask;
8601                 rt_act->sa_flags = oact.sa_flags;
8602                 unlock_user_struct(rt_act, arg3, 1);
8603             }
8604 #else
8605 #ifdef TARGET_SPARC
8606             target_ulong restorer = arg4;
8607             target_ulong sigsetsize = arg5;
8608 #else
8609             target_ulong sigsetsize = arg4;
8610 #endif
8611             struct target_sigaction *act;
8612             struct target_sigaction *oact;
8613 
8614             if (sigsetsize != sizeof(target_sigset_t)) {
8615                 return -TARGET_EINVAL;
8616             }
8617             if (arg2) {
8618                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8619                     return -TARGET_EFAULT;
8620                 }
8621 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8622                 act->ka_restorer = restorer;
8623 #endif
8624             } else {
8625                 act = NULL;
8626             }
8627             if (arg3) {
8628                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8629                     ret = -TARGET_EFAULT;
8630                     goto rt_sigaction_fail;
8631                 }
8632             } else
8633                 oact = NULL;
8634             ret = get_errno(do_sigaction(arg1, act, oact));
8635 	rt_sigaction_fail:
8636             if (act)
8637                 unlock_user_struct(act, arg2, 0);
8638             if (oact)
8639                 unlock_user_struct(oact, arg3, 1);
8640 #endif
8641         }
8642         return ret;
8643 #ifdef TARGET_NR_sgetmask /* not on alpha */
8644     case TARGET_NR_sgetmask:
8645         {
8646             sigset_t cur_set;
8647             abi_ulong target_set;
8648             ret = do_sigprocmask(0, NULL, &cur_set);
8649             if (!ret) {
8650                 host_to_target_old_sigset(&target_set, &cur_set);
8651                 ret = target_set;
8652             }
8653         }
8654         return ret;
8655 #endif
8656 #ifdef TARGET_NR_ssetmask /* not on alpha */
8657     case TARGET_NR_ssetmask:
8658         {
8659             sigset_t set, oset;
8660             abi_ulong target_set = arg1;
8661             target_to_host_old_sigset(&set, &target_set);
8662             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8663             if (!ret) {
8664                 host_to_target_old_sigset(&target_set, &oset);
8665                 ret = target_set;
8666             }
8667         }
8668         return ret;
8669 #endif
8670 #ifdef TARGET_NR_sigprocmask
8671     case TARGET_NR_sigprocmask:
8672         {
8673 #if defined(TARGET_ALPHA)
8674             sigset_t set, oldset;
8675             abi_ulong mask;
8676             int how;
8677 
8678             switch (arg1) {
8679             case TARGET_SIG_BLOCK:
8680                 how = SIG_BLOCK;
8681                 break;
8682             case TARGET_SIG_UNBLOCK:
8683                 how = SIG_UNBLOCK;
8684                 break;
8685             case TARGET_SIG_SETMASK:
8686                 how = SIG_SETMASK;
8687                 break;
8688             default:
8689                 return -TARGET_EINVAL;
8690             }
8691             mask = arg2;
8692             target_to_host_old_sigset(&set, &mask);
8693 
8694             ret = do_sigprocmask(how, &set, &oldset);
8695             if (!is_error(ret)) {
8696                 host_to_target_old_sigset(&mask, &oldset);
8697                 ret = mask;
8698                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8699             }
8700 #else
8701             sigset_t set, oldset, *set_ptr;
8702             int how;
8703 
8704             if (arg2) {
8705                 switch (arg1) {
8706                 case TARGET_SIG_BLOCK:
8707                     how = SIG_BLOCK;
8708                     break;
8709                 case TARGET_SIG_UNBLOCK:
8710                     how = SIG_UNBLOCK;
8711                     break;
8712                 case TARGET_SIG_SETMASK:
8713                     how = SIG_SETMASK;
8714                     break;
8715                 default:
8716                     return -TARGET_EINVAL;
8717                 }
8718                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8719                     return -TARGET_EFAULT;
8720                 target_to_host_old_sigset(&set, p);
8721                 unlock_user(p, arg2, 0);
8722                 set_ptr = &set;
8723             } else {
8724                 how = 0;
8725                 set_ptr = NULL;
8726             }
8727             ret = do_sigprocmask(how, set_ptr, &oldset);
8728             if (!is_error(ret) && arg3) {
8729                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8730                     return -TARGET_EFAULT;
8731                 host_to_target_old_sigset(p, &oldset);
8732                 unlock_user(p, arg3, sizeof(target_sigset_t));
8733             }
8734 #endif
8735         }
8736         return ret;
8737 #endif
8738     case TARGET_NR_rt_sigprocmask:
8739         {
8740             int how = arg1;
8741             sigset_t set, oldset, *set_ptr;
8742 
8743             if (arg4 != sizeof(target_sigset_t)) {
8744                 return -TARGET_EINVAL;
8745             }
8746 
8747             if (arg2) {
8748                 switch(how) {
8749                 case TARGET_SIG_BLOCK:
8750                     how = SIG_BLOCK;
8751                     break;
8752                 case TARGET_SIG_UNBLOCK:
8753                     how = SIG_UNBLOCK;
8754                     break;
8755                 case TARGET_SIG_SETMASK:
8756                     how = SIG_SETMASK;
8757                     break;
8758                 default:
8759                     return -TARGET_EINVAL;
8760                 }
8761                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8762                     return -TARGET_EFAULT;
8763                 target_to_host_sigset(&set, p);
8764                 unlock_user(p, arg2, 0);
8765                 set_ptr = &set;
8766             } else {
8767                 how = 0;
8768                 set_ptr = NULL;
8769             }
8770             ret = do_sigprocmask(how, set_ptr, &oldset);
8771             if (!is_error(ret) && arg3) {
8772                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8773                     return -TARGET_EFAULT;
8774                 host_to_target_sigset(p, &oldset);
8775                 unlock_user(p, arg3, sizeof(target_sigset_t));
8776             }
8777         }
8778         return ret;
8779 #ifdef TARGET_NR_sigpending
8780     case TARGET_NR_sigpending:
8781         {
8782             sigset_t set;
8783             ret = get_errno(sigpending(&set));
8784             if (!is_error(ret)) {
8785                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8786                     return -TARGET_EFAULT;
8787                 host_to_target_old_sigset(p, &set);
8788                 unlock_user(p, arg1, sizeof(target_sigset_t));
8789             }
8790         }
8791         return ret;
8792 #endif
8793     case TARGET_NR_rt_sigpending:
8794         {
8795             sigset_t set;
8796 
8797             /* Yes, this check is >, not != like most. We follow the kernel's
8798              * logic and it does it like this because it implements
8799              * NR_sigpending through the same code path, and in that case
8800              * the old_sigset_t is smaller in size.
8801              */
8802             if (arg2 > sizeof(target_sigset_t)) {
8803                 return -TARGET_EINVAL;
8804             }
8805 
8806             ret = get_errno(sigpending(&set));
8807             if (!is_error(ret)) {
8808                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8809                     return -TARGET_EFAULT;
8810                 host_to_target_sigset(p, &set);
8811                 unlock_user(p, arg1, sizeof(target_sigset_t));
8812             }
8813         }
8814         return ret;
8815 #ifdef TARGET_NR_sigsuspend
8816     case TARGET_NR_sigsuspend:
8817         {
8818             TaskState *ts = cpu->opaque;
8819 #if defined(TARGET_ALPHA)
8820             abi_ulong mask = arg1;
8821             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8822 #else
8823             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8824                 return -TARGET_EFAULT;
8825             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8826             unlock_user(p, arg1, 0);
8827 #endif
8828             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8829                                                SIGSET_T_SIZE));
8830             if (ret != -TARGET_ERESTARTSYS) {
8831                 ts->in_sigsuspend = 1;
8832             }
8833         }
8834         return ret;
8835 #endif
8836     case TARGET_NR_rt_sigsuspend:
8837         {
8838             TaskState *ts = cpu->opaque;
8839 
8840             if (arg2 != sizeof(target_sigset_t)) {
8841                 return -TARGET_EINVAL;
8842             }
8843             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8844                 return -TARGET_EFAULT;
8845             target_to_host_sigset(&ts->sigsuspend_mask, p);
8846             unlock_user(p, arg1, 0);
8847             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8848                                                SIGSET_T_SIZE));
8849             if (ret != -TARGET_ERESTARTSYS) {
8850                 ts->in_sigsuspend = 1;
8851             }
8852         }
8853         return ret;
8854 #ifdef TARGET_NR_rt_sigtimedwait
8855     case TARGET_NR_rt_sigtimedwait:
8856         {
8857             sigset_t set;
8858             struct timespec uts, *puts;
8859             siginfo_t uinfo;
8860 
8861             if (arg4 != sizeof(target_sigset_t)) {
8862                 return -TARGET_EINVAL;
8863             }
8864 
8865             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8866                 return -TARGET_EFAULT;
8867             target_to_host_sigset(&set, p);
8868             unlock_user(p, arg1, 0);
8869             if (arg3) {
8870                 puts = &uts;
8871                 target_to_host_timespec(puts, arg3);
8872             } else {
8873                 puts = NULL;
8874             }
8875             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8876                                                  SIGSET_T_SIZE));
8877             if (!is_error(ret)) {
8878                 if (arg2) {
8879                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8880                                   0);
8881                     if (!p) {
8882                         return -TARGET_EFAULT;
8883                     }
8884                     host_to_target_siginfo(p, &uinfo);
8885                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8886                 }
8887                 ret = host_to_target_signal(ret);
8888             }
8889         }
8890         return ret;
8891 #endif
8892     case TARGET_NR_rt_sigqueueinfo:
8893         {
8894             siginfo_t uinfo;
8895 
8896             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8897             if (!p) {
8898                 return -TARGET_EFAULT;
8899             }
8900             target_to_host_siginfo(&uinfo, p);
8901             unlock_user(p, arg3, 0);
8902             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8903         }
8904         return ret;
8905     case TARGET_NR_rt_tgsigqueueinfo:
8906         {
8907             siginfo_t uinfo;
8908 
8909             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8910             if (!p) {
8911                 return -TARGET_EFAULT;
8912             }
8913             target_to_host_siginfo(&uinfo, p);
8914             unlock_user(p, arg4, 0);
8915             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8916         }
8917         return ret;
8918 #ifdef TARGET_NR_sigreturn
8919     case TARGET_NR_sigreturn:
8920         if (block_signals()) {
8921             return -TARGET_ERESTARTSYS;
8922         }
8923         return do_sigreturn(cpu_env);
8924 #endif
8925     case TARGET_NR_rt_sigreturn:
8926         if (block_signals()) {
8927             return -TARGET_ERESTARTSYS;
8928         }
8929         return do_rt_sigreturn(cpu_env);
8930     case TARGET_NR_sethostname:
8931         if (!(p = lock_user_string(arg1)))
8932             return -TARGET_EFAULT;
8933         ret = get_errno(sethostname(p, arg2));
8934         unlock_user(p, arg1, 0);
8935         return ret;
8936 #ifdef TARGET_NR_setrlimit
8937     case TARGET_NR_setrlimit:
8938         {
8939             int resource = target_to_host_resource(arg1);
8940             struct target_rlimit *target_rlim;
8941             struct rlimit rlim;
8942             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8943                 return -TARGET_EFAULT;
8944             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8945             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8946             unlock_user_struct(target_rlim, arg2, 0);
8947             /*
8948              * If we just passed through resource limit settings for memory then
8949              * they would also apply to QEMU's own allocations, and QEMU will
8950              * crash or hang or die if its allocations fail. Ideally we would
8951              * track the guest allocations in QEMU and apply the limits ourselves.
8952              * For now, just tell the guest the call succeeded but don't actually
8953              * limit anything.
8954              */
8955             if (resource != RLIMIT_AS &&
8956                 resource != RLIMIT_DATA &&
8957                 resource != RLIMIT_STACK) {
8958                 return get_errno(setrlimit(resource, &rlim));
8959             } else {
8960                 return 0;
8961             }
8962         }
8963 #endif
8964 #ifdef TARGET_NR_getrlimit
8965     case TARGET_NR_getrlimit:
8966         {
8967             int resource = target_to_host_resource(arg1);
8968             struct target_rlimit *target_rlim;
8969             struct rlimit rlim;
8970 
8971             ret = get_errno(getrlimit(resource, &rlim));
8972             if (!is_error(ret)) {
8973                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8974                     return -TARGET_EFAULT;
8975                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8976                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8977                 unlock_user_struct(target_rlim, arg2, 1);
8978             }
8979         }
8980         return ret;
8981 #endif
8982     case TARGET_NR_getrusage:
8983         {
8984             struct rusage rusage;
8985             ret = get_errno(getrusage(arg1, &rusage));
8986             if (!is_error(ret)) {
8987                 ret = host_to_target_rusage(arg2, &rusage);
8988             }
8989         }
8990         return ret;
8991 #if defined(TARGET_NR_gettimeofday)
8992     case TARGET_NR_gettimeofday:
8993         {
8994             struct timeval tv;
8995             struct timezone tz;
8996 
8997             ret = get_errno(gettimeofday(&tv, &tz));
8998             if (!is_error(ret)) {
8999                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9000                     return -TARGET_EFAULT;
9001                 }
9002                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9003                     return -TARGET_EFAULT;
9004                 }
9005             }
9006         }
9007         return ret;
9008 #endif
9009 #if defined(TARGET_NR_settimeofday)
9010     case TARGET_NR_settimeofday:
9011         {
9012             struct timeval tv, *ptv = NULL;
9013             struct timezone tz, *ptz = NULL;
9014 
9015             if (arg1) {
9016                 if (copy_from_user_timeval(&tv, arg1)) {
9017                     return -TARGET_EFAULT;
9018                 }
9019                 ptv = &tv;
9020             }
9021 
9022             if (arg2) {
9023                 if (copy_from_user_timezone(&tz, arg2)) {
9024                     return -TARGET_EFAULT;
9025                 }
9026                 ptz = &tz;
9027             }
9028 
9029             return get_errno(settimeofday(ptv, ptz));
9030         }
9031 #endif
9032 #if defined(TARGET_NR_select)
9033     case TARGET_NR_select:
9034 #if defined(TARGET_WANT_NI_OLD_SELECT)
9035         /* some architectures used to have old_select here
9036          * but now ENOSYS it.
9037          */
9038         ret = -TARGET_ENOSYS;
9039 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9040         ret = do_old_select(arg1);
9041 #else
9042         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9043 #endif
9044         return ret;
9045 #endif
9046 #ifdef TARGET_NR_pselect6
9047     case TARGET_NR_pselect6:
9048         {
9049             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9050             fd_set rfds, wfds, efds;
9051             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9052             struct timespec ts, *ts_ptr;
9053 
9054             /*
9055              * The 6th arg is actually two args smashed together,
9056              * so we cannot use the C library.
9057              */
9058             sigset_t set;
9059             struct {
9060                 sigset_t *set;
9061                 size_t size;
9062             } sig, *sig_ptr;
9063 
9064             abi_ulong arg_sigset, arg_sigsize, *arg7;
9065             target_sigset_t *target_sigset;
9066 
9067             n = arg1;
9068             rfd_addr = arg2;
9069             wfd_addr = arg3;
9070             efd_addr = arg4;
9071             ts_addr = arg5;
9072 
9073             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9074             if (ret) {
9075                 return ret;
9076             }
9077             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9078             if (ret) {
9079                 return ret;
9080             }
9081             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9082             if (ret) {
9083                 return ret;
9084             }
9085 
9086             /*
9087              * This takes a timespec, and not a timeval, so we cannot
9088              * use the do_select() helper ...
9089              */
9090             if (ts_addr) {
9091                 if (target_to_host_timespec(&ts, ts_addr)) {
9092                     return -TARGET_EFAULT;
9093                 }
9094                 ts_ptr = &ts;
9095             } else {
9096                 ts_ptr = NULL;
9097             }
9098 
9099             /* Extract the two packed args for the sigset */
9100             if (arg6) {
9101                 sig_ptr = &sig;
9102                 sig.size = SIGSET_T_SIZE;
9103 
9104                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9105                 if (!arg7) {
9106                     return -TARGET_EFAULT;
9107                 }
9108                 arg_sigset = tswapal(arg7[0]);
9109                 arg_sigsize = tswapal(arg7[1]);
9110                 unlock_user(arg7, arg6, 0);
9111 
9112                 if (arg_sigset) {
9113                     sig.set = &set;
9114                     if (arg_sigsize != sizeof(*target_sigset)) {
9115                         /* Like the kernel, we enforce correct size sigsets */
9116                         return -TARGET_EINVAL;
9117                     }
9118                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9119                                               sizeof(*target_sigset), 1);
9120                     if (!target_sigset) {
9121                         return -TARGET_EFAULT;
9122                     }
9123                     target_to_host_sigset(&set, target_sigset);
9124                     unlock_user(target_sigset, arg_sigset, 0);
9125                 } else {
9126                     sig.set = NULL;
9127                 }
9128             } else {
9129                 sig_ptr = NULL;
9130             }
9131 
9132             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9133                                           ts_ptr, sig_ptr));
9134 
9135             if (!is_error(ret)) {
9136                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9137                     return -TARGET_EFAULT;
9138                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9139                     return -TARGET_EFAULT;
9140                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9141                     return -TARGET_EFAULT;
9142 
9143                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9144                     return -TARGET_EFAULT;
9145             }
9146         }
9147         return ret;
9148 #endif
9149 #ifdef TARGET_NR_symlink
9150     case TARGET_NR_symlink:
9151         {
9152             void *p2;
9153             p = lock_user_string(arg1);
9154             p2 = lock_user_string(arg2);
9155             if (!p || !p2)
9156                 ret = -TARGET_EFAULT;
9157             else
9158                 ret = get_errno(symlink(p, p2));
9159             unlock_user(p2, arg2, 0);
9160             unlock_user(p, arg1, 0);
9161         }
9162         return ret;
9163 #endif
9164 #if defined(TARGET_NR_symlinkat)
9165     case TARGET_NR_symlinkat:
9166         {
9167             void *p2;
9168             p  = lock_user_string(arg1);
9169             p2 = lock_user_string(arg3);
9170             if (!p || !p2)
9171                 ret = -TARGET_EFAULT;
9172             else
9173                 ret = get_errno(symlinkat(p, arg2, p2));
9174             unlock_user(p2, arg3, 0);
9175             unlock_user(p, arg1, 0);
9176         }
9177         return ret;
9178 #endif
9179 #ifdef TARGET_NR_readlink
9180     case TARGET_NR_readlink:
9181         {
9182             void *p2;
9183             p = lock_user_string(arg1);
9184             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9185             if (!p || !p2) {
9186                 ret = -TARGET_EFAULT;
9187             } else if (!arg3) {
9188                 /* Short circuit this for the magic exe check. */
9189                 ret = -TARGET_EINVAL;
9190             } else if (is_proc_myself((const char *)p, "exe")) {
9191                 char real[PATH_MAX], *temp;
9192                 temp = realpath(exec_path, real);
9193                 /* Return value is # of bytes that we wrote to the buffer. */
9194                 if (temp == NULL) {
9195                     ret = get_errno(-1);
9196                 } else {
9197                     /* Don't worry about sign mismatch as earlier mapping
9198                      * logic would have thrown a bad address error. */
9199                     ret = MIN(strlen(real), arg3);
9200                     /* We cannot NUL terminate the string. */
9201                     memcpy(p2, real, ret);
9202                 }
9203             } else {
9204                 ret = get_errno(readlink(path(p), p2, arg3));
9205             }
9206             unlock_user(p2, arg2, ret);
9207             unlock_user(p, arg1, 0);
9208         }
9209         return ret;
9210 #endif
9211 #if defined(TARGET_NR_readlinkat)
9212     case TARGET_NR_readlinkat:
9213         {
9214             void *p2;
9215             p  = lock_user_string(arg2);
9216             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9217             if (!p || !p2) {
9218                 ret = -TARGET_EFAULT;
9219             } else if (is_proc_myself((const char *)p, "exe")) {
9220                 char real[PATH_MAX], *temp;
9221                 temp = realpath(exec_path, real);
9222                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9223                 snprintf((char *)p2, arg4, "%s", real);
9224             } else {
9225                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9226             }
9227             unlock_user(p2, arg3, ret);
9228             unlock_user(p, arg2, 0);
9229         }
9230         return ret;
9231 #endif
9232 #ifdef TARGET_NR_swapon
9233     case TARGET_NR_swapon:
9234         if (!(p = lock_user_string(arg1)))
9235             return -TARGET_EFAULT;
9236         ret = get_errno(swapon(p, arg2));
9237         unlock_user(p, arg1, 0);
9238         return ret;
9239 #endif
9240     case TARGET_NR_reboot:
9241         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9242            /* arg4 must be ignored in all other cases */
9243            p = lock_user_string(arg4);
9244            if (!p) {
9245                return -TARGET_EFAULT;
9246            }
9247            ret = get_errno(reboot(arg1, arg2, arg3, p));
9248            unlock_user(p, arg4, 0);
9249         } else {
9250            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9251         }
9252         return ret;
9253 #ifdef TARGET_NR_mmap
9254     case TARGET_NR_mmap:
9255 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9256     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9257     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9258     || defined(TARGET_S390X)
9259         {
9260             abi_ulong *v;
9261             abi_ulong v1, v2, v3, v4, v5, v6;
9262             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9263                 return -TARGET_EFAULT;
9264             v1 = tswapal(v[0]);
9265             v2 = tswapal(v[1]);
9266             v3 = tswapal(v[2]);
9267             v4 = tswapal(v[3]);
9268             v5 = tswapal(v[4]);
9269             v6 = tswapal(v[5]);
9270             unlock_user(v, arg1, 0);
9271             ret = get_errno(target_mmap(v1, v2, v3,
9272                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9273                                         v5, v6));
9274         }
9275 #else
9276         ret = get_errno(target_mmap(arg1, arg2, arg3,
9277                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9278                                     arg5,
9279                                     arg6));
9280 #endif
9281         return ret;
9282 #endif
9283 #ifdef TARGET_NR_mmap2
9284     case TARGET_NR_mmap2:
9285 #ifndef MMAP_SHIFT
9286 #define MMAP_SHIFT 12
9287 #endif
9288         ret = target_mmap(arg1, arg2, arg3,
9289                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9290                           arg5, arg6 << MMAP_SHIFT);
9291         return get_errno(ret);
9292 #endif
9293     case TARGET_NR_munmap:
9294         return get_errno(target_munmap(arg1, arg2));
9295     case TARGET_NR_mprotect:
9296         {
9297             TaskState *ts = cpu->opaque;
9298             /* Special hack to detect libc making the stack executable.  */
9299             if ((arg3 & PROT_GROWSDOWN)
9300                 && arg1 >= ts->info->stack_limit
9301                 && arg1 <= ts->info->start_stack) {
9302                 arg3 &= ~PROT_GROWSDOWN;
9303                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9304                 arg1 = ts->info->stack_limit;
9305             }
9306         }
9307         return get_errno(target_mprotect(arg1, arg2, arg3));
9308 #ifdef TARGET_NR_mremap
9309     case TARGET_NR_mremap:
9310         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9311 #endif
9312         /* ??? msync/mlock/munlock are broken for softmmu.  */
9313 #ifdef TARGET_NR_msync
9314     case TARGET_NR_msync:
9315         return get_errno(msync(g2h(arg1), arg2, arg3));
9316 #endif
9317 #ifdef TARGET_NR_mlock
9318     case TARGET_NR_mlock:
9319         return get_errno(mlock(g2h(arg1), arg2));
9320 #endif
9321 #ifdef TARGET_NR_munlock
9322     case TARGET_NR_munlock:
9323         return get_errno(munlock(g2h(arg1), arg2));
9324 #endif
9325 #ifdef TARGET_NR_mlockall
9326     case TARGET_NR_mlockall:
9327         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9328 #endif
9329 #ifdef TARGET_NR_munlockall
9330     case TARGET_NR_munlockall:
9331         return get_errno(munlockall());
9332 #endif
9333 #ifdef TARGET_NR_truncate
9334     case TARGET_NR_truncate:
9335         if (!(p = lock_user_string(arg1)))
9336             return -TARGET_EFAULT;
9337         ret = get_errno(truncate(p, arg2));
9338         unlock_user(p, arg1, 0);
9339         return ret;
9340 #endif
9341 #ifdef TARGET_NR_ftruncate
9342     case TARGET_NR_ftruncate:
9343         return get_errno(ftruncate(arg1, arg2));
9344 #endif
9345     case TARGET_NR_fchmod:
9346         return get_errno(fchmod(arg1, arg2));
9347 #if defined(TARGET_NR_fchmodat)
9348     case TARGET_NR_fchmodat:
9349         if (!(p = lock_user_string(arg2)))
9350             return -TARGET_EFAULT;
9351         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9352         unlock_user(p, arg2, 0);
9353         return ret;
9354 #endif
9355     case TARGET_NR_getpriority:
9356         /* Note that negative values are valid for getpriority, so we must
9357            differentiate based on errno settings.  */
9358         errno = 0;
9359         ret = getpriority(arg1, arg2);
9360         if (ret == -1 && errno != 0) {
9361             return -host_to_target_errno(errno);
9362         }
9363 #ifdef TARGET_ALPHA
9364         /* Return value is the unbiased priority.  Signal no error.  */
9365         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9366 #else
9367         /* Return value is a biased priority to avoid negative numbers.  */
9368         ret = 20 - ret;
9369 #endif
9370         return ret;
9371     case TARGET_NR_setpriority:
9372         return get_errno(setpriority(arg1, arg2, arg3));
9373 #ifdef TARGET_NR_statfs
9374     case TARGET_NR_statfs:
9375         if (!(p = lock_user_string(arg1))) {
9376             return -TARGET_EFAULT;
9377         }
9378         ret = get_errno(statfs(path(p), &stfs));
9379         unlock_user(p, arg1, 0);
9380     convert_statfs:
9381         if (!is_error(ret)) {
9382             struct target_statfs *target_stfs;
9383 
9384             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9385                 return -TARGET_EFAULT;
9386             __put_user(stfs.f_type, &target_stfs->f_type);
9387             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9388             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9389             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9390             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9391             __put_user(stfs.f_files, &target_stfs->f_files);
9392             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9393             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9394             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9395             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9396             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9397 #ifdef _STATFS_F_FLAGS
9398             __put_user(stfs.f_flags, &target_stfs->f_flags);
9399 #else
9400             __put_user(0, &target_stfs->f_flags);
9401 #endif
9402             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9403             unlock_user_struct(target_stfs, arg2, 1);
9404         }
9405         return ret;
9406 #endif
9407 #ifdef TARGET_NR_fstatfs
9408     case TARGET_NR_fstatfs:
9409         ret = get_errno(fstatfs(arg1, &stfs));
9410         goto convert_statfs;
9411 #endif
9412 #ifdef TARGET_NR_statfs64
9413     case TARGET_NR_statfs64:
9414         if (!(p = lock_user_string(arg1))) {
9415             return -TARGET_EFAULT;
9416         }
9417         ret = get_errno(statfs(path(p), &stfs));
9418         unlock_user(p, arg1, 0);
9419     convert_statfs64:
9420         if (!is_error(ret)) {
9421             struct target_statfs64 *target_stfs;
9422 
9423             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9424                 return -TARGET_EFAULT;
9425             __put_user(stfs.f_type, &target_stfs->f_type);
9426             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9427             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9428             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9429             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9430             __put_user(stfs.f_files, &target_stfs->f_files);
9431             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9432             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9433             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9434             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9435             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9436             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9437             unlock_user_struct(target_stfs, arg3, 1);
9438         }
9439         return ret;
9440     case TARGET_NR_fstatfs64:
9441         ret = get_errno(fstatfs(arg1, &stfs));
9442         goto convert_statfs64;
9443 #endif
9444 #ifdef TARGET_NR_socketcall
9445     case TARGET_NR_socketcall:
9446         return do_socketcall(arg1, arg2);
9447 #endif
9448 #ifdef TARGET_NR_accept
9449     case TARGET_NR_accept:
9450         return do_accept4(arg1, arg2, arg3, 0);
9451 #endif
9452 #ifdef TARGET_NR_accept4
9453     case TARGET_NR_accept4:
9454         return do_accept4(arg1, arg2, arg3, arg4);
9455 #endif
9456 #ifdef TARGET_NR_bind
9457     case TARGET_NR_bind:
9458         return do_bind(arg1, arg2, arg3);
9459 #endif
9460 #ifdef TARGET_NR_connect
9461     case TARGET_NR_connect:
9462         return do_connect(arg1, arg2, arg3);
9463 #endif
9464 #ifdef TARGET_NR_getpeername
9465     case TARGET_NR_getpeername:
9466         return do_getpeername(arg1, arg2, arg3);
9467 #endif
9468 #ifdef TARGET_NR_getsockname
9469     case TARGET_NR_getsockname:
9470         return do_getsockname(arg1, arg2, arg3);
9471 #endif
9472 #ifdef TARGET_NR_getsockopt
9473     case TARGET_NR_getsockopt:
9474         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9475 #endif
9476 #ifdef TARGET_NR_listen
9477     case TARGET_NR_listen:
9478         return get_errno(listen(arg1, arg2));
9479 #endif
9480 #ifdef TARGET_NR_recv
9481     case TARGET_NR_recv:
9482         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9483 #endif
9484 #ifdef TARGET_NR_recvfrom
9485     case TARGET_NR_recvfrom:
9486         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9487 #endif
9488 #ifdef TARGET_NR_recvmsg
9489     case TARGET_NR_recvmsg:
9490         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9491 #endif
9492 #ifdef TARGET_NR_send
9493     case TARGET_NR_send:
9494         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9495 #endif
9496 #ifdef TARGET_NR_sendmsg
9497     case TARGET_NR_sendmsg:
9498         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9499 #endif
9500 #ifdef TARGET_NR_sendmmsg
9501     case TARGET_NR_sendmmsg:
9502         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9503 #endif
9504 #ifdef TARGET_NR_recvmmsg
9505     case TARGET_NR_recvmmsg:
9506         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9507 #endif
9508 #ifdef TARGET_NR_sendto
9509     case TARGET_NR_sendto:
9510         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9511 #endif
9512 #ifdef TARGET_NR_shutdown
9513     case TARGET_NR_shutdown:
9514         return get_errno(shutdown(arg1, arg2));
9515 #endif
9516 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9517     case TARGET_NR_getrandom:
9518         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9519         if (!p) {
9520             return -TARGET_EFAULT;
9521         }
9522         ret = get_errno(getrandom(p, arg2, arg3));
9523         unlock_user(p, arg1, ret);
9524         return ret;
9525 #endif
9526 #ifdef TARGET_NR_socket
9527     case TARGET_NR_socket:
9528         return do_socket(arg1, arg2, arg3);
9529 #endif
9530 #ifdef TARGET_NR_socketpair
9531     case TARGET_NR_socketpair:
9532         return do_socketpair(arg1, arg2, arg3, arg4);
9533 #endif
9534 #ifdef TARGET_NR_setsockopt
9535     case TARGET_NR_setsockopt:
9536         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9537 #endif
9538 #if defined(TARGET_NR_syslog)
9539     case TARGET_NR_syslog:
9540         {
9541             int len = arg2;
9542 
9543             switch (arg1) {
9544             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9545             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9546             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9547             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9548             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9549             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9550             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9551             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9552                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9553             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9554             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9555             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9556                 {
9557                     if (len < 0) {
9558                         return -TARGET_EINVAL;
9559                     }
9560                     if (len == 0) {
9561                         return 0;
9562                     }
9563                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9564                     if (!p) {
9565                         return -TARGET_EFAULT;
9566                     }
9567                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9568                     unlock_user(p, arg2, arg3);
9569                 }
9570                 return ret;
9571             default:
9572                 return -TARGET_EINVAL;
9573             }
9574         }
9575         break;
9576 #endif
9577     case TARGET_NR_setitimer:
9578         {
9579             struct itimerval value, ovalue, *pvalue;
9580 
9581             if (arg2) {
9582                 pvalue = &value;
9583                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9584                     || copy_from_user_timeval(&pvalue->it_value,
9585                                               arg2 + sizeof(struct target_timeval)))
9586                     return -TARGET_EFAULT;
9587             } else {
9588                 pvalue = NULL;
9589             }
9590             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9591             if (!is_error(ret) && arg3) {
9592                 if (copy_to_user_timeval(arg3,
9593                                          &ovalue.it_interval)
9594                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9595                                             &ovalue.it_value))
9596                     return -TARGET_EFAULT;
9597             }
9598         }
9599         return ret;
9600     case TARGET_NR_getitimer:
9601         {
9602             struct itimerval value;
9603 
9604             ret = get_errno(getitimer(arg1, &value));
9605             if (!is_error(ret) && arg2) {
9606                 if (copy_to_user_timeval(arg2,
9607                                          &value.it_interval)
9608                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9609                                             &value.it_value))
9610                     return -TARGET_EFAULT;
9611             }
9612         }
9613         return ret;
9614 #ifdef TARGET_NR_stat
9615     case TARGET_NR_stat:
9616         if (!(p = lock_user_string(arg1))) {
9617             return -TARGET_EFAULT;
9618         }
9619         ret = get_errno(stat(path(p), &st));
9620         unlock_user(p, arg1, 0);
9621         goto do_stat;
9622 #endif
9623 #ifdef TARGET_NR_lstat
9624     case TARGET_NR_lstat:
9625         if (!(p = lock_user_string(arg1))) {
9626             return -TARGET_EFAULT;
9627         }
9628         ret = get_errno(lstat(path(p), &st));
9629         unlock_user(p, arg1, 0);
9630         goto do_stat;
9631 #endif
9632 #ifdef TARGET_NR_fstat
9633     case TARGET_NR_fstat:
9634         {
9635             ret = get_errno(fstat(arg1, &st));
9636 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9637         do_stat:
9638 #endif
9639             if (!is_error(ret)) {
9640                 struct target_stat *target_st;
9641 
9642                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9643                     return -TARGET_EFAULT;
9644                 memset(target_st, 0, sizeof(*target_st));
9645                 __put_user(st.st_dev, &target_st->st_dev);
9646                 __put_user(st.st_ino, &target_st->st_ino);
9647                 __put_user(st.st_mode, &target_st->st_mode);
9648                 __put_user(st.st_uid, &target_st->st_uid);
9649                 __put_user(st.st_gid, &target_st->st_gid);
9650                 __put_user(st.st_nlink, &target_st->st_nlink);
9651                 __put_user(st.st_rdev, &target_st->st_rdev);
9652                 __put_user(st.st_size, &target_st->st_size);
9653                 __put_user(st.st_blksize, &target_st->st_blksize);
9654                 __put_user(st.st_blocks, &target_st->st_blocks);
9655                 __put_user(st.st_atime, &target_st->target_st_atime);
9656                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9657                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9658 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9659     defined(TARGET_STAT_HAVE_NSEC)
9660                 __put_user(st.st_atim.tv_nsec,
9661                            &target_st->target_st_atime_nsec);
9662                 __put_user(st.st_mtim.tv_nsec,
9663                            &target_st->target_st_mtime_nsec);
9664                 __put_user(st.st_ctim.tv_nsec,
9665                            &target_st->target_st_ctime_nsec);
9666 #endif
9667                 unlock_user_struct(target_st, arg2, 1);
9668             }
9669         }
9670         return ret;
9671 #endif
9672     case TARGET_NR_vhangup:
9673         return get_errno(vhangup());
9674 #ifdef TARGET_NR_syscall
9675     case TARGET_NR_syscall:
9676         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9677                           arg6, arg7, arg8, 0);
9678 #endif
9679 #if defined(TARGET_NR_wait4)
9680     case TARGET_NR_wait4:
9681         {
9682             int status;
9683             abi_long status_ptr = arg2;
9684             struct rusage rusage, *rusage_ptr;
9685             abi_ulong target_rusage = arg4;
9686             abi_long rusage_err;
9687             if (target_rusage)
9688                 rusage_ptr = &rusage;
9689             else
9690                 rusage_ptr = NULL;
9691             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9692             if (!is_error(ret)) {
9693                 if (status_ptr && ret) {
9694                     status = host_to_target_waitstatus(status);
9695                     if (put_user_s32(status, status_ptr))
9696                         return -TARGET_EFAULT;
9697                 }
9698                 if (target_rusage) {
9699                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9700                     if (rusage_err) {
9701                         ret = rusage_err;
9702                     }
9703                 }
9704             }
9705         }
9706         return ret;
9707 #endif
9708 #ifdef TARGET_NR_swapoff
9709     case TARGET_NR_swapoff:
9710         if (!(p = lock_user_string(arg1)))
9711             return -TARGET_EFAULT;
9712         ret = get_errno(swapoff(p));
9713         unlock_user(p, arg1, 0);
9714         return ret;
9715 #endif
9716     case TARGET_NR_sysinfo:
9717         {
9718             struct target_sysinfo *target_value;
9719             struct sysinfo value;
9720             ret = get_errno(sysinfo(&value));
9721             if (!is_error(ret) && arg1)
9722             {
9723                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9724                     return -TARGET_EFAULT;
9725                 __put_user(value.uptime, &target_value->uptime);
9726                 __put_user(value.loads[0], &target_value->loads[0]);
9727                 __put_user(value.loads[1], &target_value->loads[1]);
9728                 __put_user(value.loads[2], &target_value->loads[2]);
9729                 __put_user(value.totalram, &target_value->totalram);
9730                 __put_user(value.freeram, &target_value->freeram);
9731                 __put_user(value.sharedram, &target_value->sharedram);
9732                 __put_user(value.bufferram, &target_value->bufferram);
9733                 __put_user(value.totalswap, &target_value->totalswap);
9734                 __put_user(value.freeswap, &target_value->freeswap);
9735                 __put_user(value.procs, &target_value->procs);
9736                 __put_user(value.totalhigh, &target_value->totalhigh);
9737                 __put_user(value.freehigh, &target_value->freehigh);
9738                 __put_user(value.mem_unit, &target_value->mem_unit);
9739                 unlock_user_struct(target_value, arg1, 1);
9740             }
9741         }
9742         return ret;
9743 #ifdef TARGET_NR_ipc
9744     case TARGET_NR_ipc:
9745         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9746 #endif
9747 #ifdef TARGET_NR_semget
9748     case TARGET_NR_semget:
9749         return get_errno(semget(arg1, arg2, arg3));
9750 #endif
9751 #ifdef TARGET_NR_semop
9752     case TARGET_NR_semop:
9753         return do_semtimedop(arg1, arg2, arg3, 0);
9754 #endif
9755 #ifdef TARGET_NR_semtimedop
9756     case TARGET_NR_semtimedop:
9757         return do_semtimedop(arg1, arg2, arg3, arg4);
9758 #endif
9759 #ifdef TARGET_NR_semctl
9760     case TARGET_NR_semctl:
9761         return do_semctl(arg1, arg2, arg3, arg4);
9762 #endif
9763 #ifdef TARGET_NR_msgctl
9764     case TARGET_NR_msgctl:
9765         return do_msgctl(arg1, arg2, arg3);
9766 #endif
9767 #ifdef TARGET_NR_msgget
9768     case TARGET_NR_msgget:
9769         return get_errno(msgget(arg1, arg2));
9770 #endif
9771 #ifdef TARGET_NR_msgrcv
9772     case TARGET_NR_msgrcv:
9773         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9774 #endif
9775 #ifdef TARGET_NR_msgsnd
9776     case TARGET_NR_msgsnd:
9777         return do_msgsnd(arg1, arg2, arg3, arg4);
9778 #endif
9779 #ifdef TARGET_NR_shmget
9780     case TARGET_NR_shmget:
9781         return get_errno(shmget(arg1, arg2, arg3));
9782 #endif
9783 #ifdef TARGET_NR_shmctl
9784     case TARGET_NR_shmctl:
9785         return do_shmctl(arg1, arg2, arg3);
9786 #endif
9787 #ifdef TARGET_NR_shmat
9788     case TARGET_NR_shmat:
9789         return do_shmat(cpu_env, arg1, arg2, arg3);
9790 #endif
9791 #ifdef TARGET_NR_shmdt
9792     case TARGET_NR_shmdt:
9793         return do_shmdt(arg1);
9794 #endif
9795     case TARGET_NR_fsync:
9796         return get_errno(fsync(arg1));
9797     case TARGET_NR_clone:
9798         /* Linux manages to have three different orderings for its
9799          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9800          * match the kernel's CONFIG_CLONE_* settings.
9801          * Microblaze is further special in that it uses a sixth
9802          * implicit argument to clone for the TLS pointer.
9803          */
9804 #if defined(TARGET_MICROBLAZE)
9805         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9806 #elif defined(TARGET_CLONE_BACKWARDS)
9807         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9808 #elif defined(TARGET_CLONE_BACKWARDS2)
9809         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9810 #else
9811         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9812 #endif
9813         return ret;
9814 #ifdef __NR_exit_group
9815         /* new thread calls */
9816     case TARGET_NR_exit_group:
9817         preexit_cleanup(cpu_env, arg1);
9818         return get_errno(exit_group(arg1));
9819 #endif
9820     case TARGET_NR_setdomainname:
9821         if (!(p = lock_user_string(arg1)))
9822             return -TARGET_EFAULT;
9823         ret = get_errno(setdomainname(p, arg2));
9824         unlock_user(p, arg1, 0);
9825         return ret;
9826     case TARGET_NR_uname:
9827         /* no need to transcode because we use the linux syscall */
9828         {
9829             struct new_utsname * buf;
9830 
9831             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9832                 return -TARGET_EFAULT;
9833             ret = get_errno(sys_uname(buf));
9834             if (!is_error(ret)) {
9835                 /* Overwrite the native machine name with whatever is being
9836                    emulated. */
9837                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9838                           sizeof(buf->machine));
9839                 /* Allow the user to override the reported release.  */
9840                 if (qemu_uname_release && *qemu_uname_release) {
9841                     g_strlcpy(buf->release, qemu_uname_release,
9842                               sizeof(buf->release));
9843                 }
9844             }
9845             unlock_user_struct(buf, arg1, 1);
9846         }
9847         return ret;
9848 #ifdef TARGET_I386
9849     case TARGET_NR_modify_ldt:
9850         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9851 #if !defined(TARGET_X86_64)
9852     case TARGET_NR_vm86:
9853         return do_vm86(cpu_env, arg1, arg2);
9854 #endif
9855 #endif
9856 #if defined(TARGET_NR_adjtimex)
9857     case TARGET_NR_adjtimex:
9858         {
9859             struct timex host_buf;
9860 
9861             if (target_to_host_timex(&host_buf, arg1) != 0) {
9862                 return -TARGET_EFAULT;
9863             }
9864             ret = get_errno(adjtimex(&host_buf));
9865             if (!is_error(ret)) {
9866                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9867                     return -TARGET_EFAULT;
9868                 }
9869             }
9870         }
9871         return ret;
9872 #endif
9873 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9874     case TARGET_NR_clock_adjtime:
9875         {
9876             struct timex htx, *phtx = &htx;
9877 
9878             if (target_to_host_timex(phtx, arg2) != 0) {
9879                 return -TARGET_EFAULT;
9880             }
9881             ret = get_errno(clock_adjtime(arg1, phtx));
9882             if (!is_error(ret) && phtx) {
9883                 if (host_to_target_timex(arg2, phtx) != 0) {
9884                     return -TARGET_EFAULT;
9885                 }
9886             }
9887         }
9888         return ret;
9889 #endif
9890     case TARGET_NR_getpgid:
9891         return get_errno(getpgid(arg1));
9892     case TARGET_NR_fchdir:
9893         return get_errno(fchdir(arg1));
9894     case TARGET_NR_personality:
9895         return get_errno(personality(arg1));
9896 #ifdef TARGET_NR__llseek /* Not on alpha */
9897     case TARGET_NR__llseek:
9898         {
9899             int64_t res;
9900 #if !defined(__NR_llseek)
9901             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9902             if (res == -1) {
9903                 ret = get_errno(res);
9904             } else {
9905                 ret = 0;
9906             }
9907 #else
9908             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9909 #endif
9910             if ((ret == 0) && put_user_s64(res, arg4)) {
9911                 return -TARGET_EFAULT;
9912             }
9913         }
9914         return ret;
9915 #endif
9916 #ifdef TARGET_NR_getdents
9917     case TARGET_NR_getdents:
9918 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9919 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9920         {
9921             struct target_dirent *target_dirp;
9922             struct linux_dirent *dirp;
9923             abi_long count = arg3;
9924 
9925             dirp = g_try_malloc(count);
9926             if (!dirp) {
9927                 return -TARGET_ENOMEM;
9928             }
9929 
9930             ret = get_errno(sys_getdents(arg1, dirp, count));
9931             if (!is_error(ret)) {
9932                 struct linux_dirent *de;
9933 		struct target_dirent *tde;
9934                 int len = ret;
9935                 int reclen, treclen;
9936 		int count1, tnamelen;
9937 
9938 		count1 = 0;
9939                 de = dirp;
9940                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9941                     return -TARGET_EFAULT;
9942 		tde = target_dirp;
9943                 while (len > 0) {
9944                     reclen = de->d_reclen;
9945                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9946                     assert(tnamelen >= 0);
9947                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9948                     assert(count1 + treclen <= count);
9949                     tde->d_reclen = tswap16(treclen);
9950                     tde->d_ino = tswapal(de->d_ino);
9951                     tde->d_off = tswapal(de->d_off);
9952                     memcpy(tde->d_name, de->d_name, tnamelen);
9953                     de = (struct linux_dirent *)((char *)de + reclen);
9954                     len -= reclen;
9955                     tde = (struct target_dirent *)((char *)tde + treclen);
9956 		    count1 += treclen;
9957                 }
9958 		ret = count1;
9959                 unlock_user(target_dirp, arg2, ret);
9960             }
9961             g_free(dirp);
9962         }
9963 #else
9964         {
9965             struct linux_dirent *dirp;
9966             abi_long count = arg3;
9967 
9968             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9969                 return -TARGET_EFAULT;
9970             ret = get_errno(sys_getdents(arg1, dirp, count));
9971             if (!is_error(ret)) {
9972                 struct linux_dirent *de;
9973                 int len = ret;
9974                 int reclen;
9975                 de = dirp;
9976                 while (len > 0) {
9977                     reclen = de->d_reclen;
9978                     if (reclen > len)
9979                         break;
9980                     de->d_reclen = tswap16(reclen);
9981                     tswapls(&de->d_ino);
9982                     tswapls(&de->d_off);
9983                     de = (struct linux_dirent *)((char *)de + reclen);
9984                     len -= reclen;
9985                 }
9986             }
9987             unlock_user(dirp, arg2, ret);
9988         }
9989 #endif
9990 #else
9991         /* Implement getdents in terms of getdents64 */
9992         {
9993             struct linux_dirent64 *dirp;
9994             abi_long count = arg3;
9995 
9996             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9997             if (!dirp) {
9998                 return -TARGET_EFAULT;
9999             }
10000             ret = get_errno(sys_getdents64(arg1, dirp, count));
10001             if (!is_error(ret)) {
10002                 /* Convert the dirent64 structs to target dirent.  We do this
10003                  * in-place, since we can guarantee that a target_dirent is no
10004                  * larger than a dirent64; however this means we have to be
10005                  * careful to read everything before writing in the new format.
10006                  */
10007                 struct linux_dirent64 *de;
10008                 struct target_dirent *tde;
10009                 int len = ret;
10010                 int tlen = 0;
10011 
10012                 de = dirp;
10013                 tde = (struct target_dirent *)dirp;
10014                 while (len > 0) {
10015                     int namelen, treclen;
10016                     int reclen = de->d_reclen;
10017                     uint64_t ino = de->d_ino;
10018                     int64_t off = de->d_off;
10019                     uint8_t type = de->d_type;
10020 
10021                     namelen = strlen(de->d_name);
10022                     treclen = offsetof(struct target_dirent, d_name)
10023                         + namelen + 2;
10024                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10025 
10026                     memmove(tde->d_name, de->d_name, namelen + 1);
10027                     tde->d_ino = tswapal(ino);
10028                     tde->d_off = tswapal(off);
10029                     tde->d_reclen = tswap16(treclen);
10030                     /* The target_dirent type is in what was formerly a padding
10031                      * byte at the end of the structure:
10032                      */
10033                     *(((char *)tde) + treclen - 1) = type;
10034 
10035                     de = (struct linux_dirent64 *)((char *)de + reclen);
10036                     tde = (struct target_dirent *)((char *)tde + treclen);
10037                     len -= reclen;
10038                     tlen += treclen;
10039                 }
10040                 ret = tlen;
10041             }
10042             unlock_user(dirp, arg2, ret);
10043         }
10044 #endif
10045         return ret;
10046 #endif /* TARGET_NR_getdents */
10047 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10048     case TARGET_NR_getdents64:
10049         {
10050             struct linux_dirent64 *dirp;
10051             abi_long count = arg3;
10052             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10053                 return -TARGET_EFAULT;
10054             ret = get_errno(sys_getdents64(arg1, dirp, count));
10055             if (!is_error(ret)) {
10056                 struct linux_dirent64 *de;
10057                 int len = ret;
10058                 int reclen;
10059                 de = dirp;
10060                 while (len > 0) {
10061                     reclen = de->d_reclen;
10062                     if (reclen > len)
10063                         break;
10064                     de->d_reclen = tswap16(reclen);
10065                     tswap64s((uint64_t *)&de->d_ino);
10066                     tswap64s((uint64_t *)&de->d_off);
10067                     de = (struct linux_dirent64 *)((char *)de + reclen);
10068                     len -= reclen;
10069                 }
10070             }
10071             unlock_user(dirp, arg2, ret);
10072         }
10073         return ret;
10074 #endif /* TARGET_NR_getdents64 */
10075 #if defined(TARGET_NR__newselect)
10076     case TARGET_NR__newselect:
10077         return do_select(arg1, arg2, arg3, arg4, arg5);
10078 #endif
10079 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10080 # ifdef TARGET_NR_poll
10081     case TARGET_NR_poll:
10082 # endif
10083 # ifdef TARGET_NR_ppoll
10084     case TARGET_NR_ppoll:
10085 # endif
10086         {
10087             struct target_pollfd *target_pfd;
10088             unsigned int nfds = arg2;
10089             struct pollfd *pfd;
10090             unsigned int i;
10091 
10092             pfd = NULL;
10093             target_pfd = NULL;
10094             if (nfds) {
10095                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10096                     return -TARGET_EINVAL;
10097                 }
10098 
10099                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10100                                        sizeof(struct target_pollfd) * nfds, 1);
10101                 if (!target_pfd) {
10102                     return -TARGET_EFAULT;
10103                 }
10104 
10105                 pfd = alloca(sizeof(struct pollfd) * nfds);
10106                 for (i = 0; i < nfds; i++) {
10107                     pfd[i].fd = tswap32(target_pfd[i].fd);
10108                     pfd[i].events = tswap16(target_pfd[i].events);
10109                 }
10110             }
10111 
10112             switch (num) {
10113 # ifdef TARGET_NR_ppoll
10114             case TARGET_NR_ppoll:
10115             {
10116                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10117                 target_sigset_t *target_set;
10118                 sigset_t _set, *set = &_set;
10119 
10120                 if (arg3) {
10121                     if (target_to_host_timespec(timeout_ts, arg3)) {
10122                         unlock_user(target_pfd, arg1, 0);
10123                         return -TARGET_EFAULT;
10124                     }
10125                 } else {
10126                     timeout_ts = NULL;
10127                 }
10128 
10129                 if (arg4) {
10130                     if (arg5 != sizeof(target_sigset_t)) {
10131                         unlock_user(target_pfd, arg1, 0);
10132                         return -TARGET_EINVAL;
10133                     }
10134 
10135                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10136                     if (!target_set) {
10137                         unlock_user(target_pfd, arg1, 0);
10138                         return -TARGET_EFAULT;
10139                     }
10140                     target_to_host_sigset(set, target_set);
10141                 } else {
10142                     set = NULL;
10143                 }
10144 
10145                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10146                                            set, SIGSET_T_SIZE));
10147 
10148                 if (!is_error(ret) && arg3) {
10149                     host_to_target_timespec(arg3, timeout_ts);
10150                 }
10151                 if (arg4) {
10152                     unlock_user(target_set, arg4, 0);
10153                 }
10154                 break;
10155             }
10156 # endif
10157 # ifdef TARGET_NR_poll
10158             case TARGET_NR_poll:
10159             {
10160                 struct timespec ts, *pts;
10161 
10162                 if (arg3 >= 0) {
10163                     /* Convert ms to secs, ns */
10164                     ts.tv_sec = arg3 / 1000;
10165                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10166                     pts = &ts;
10167                 } else {
10168                     /* -ve poll() timeout means "infinite" */
10169                     pts = NULL;
10170                 }
10171                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10172                 break;
10173             }
10174 # endif
10175             default:
10176                 g_assert_not_reached();
10177             }
10178 
10179             if (!is_error(ret)) {
10180                 for(i = 0; i < nfds; i++) {
10181                     target_pfd[i].revents = tswap16(pfd[i].revents);
10182                 }
10183             }
10184             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10185         }
10186         return ret;
10187 #endif
10188     case TARGET_NR_flock:
10189         /* NOTE: the flock constant seems to be the same for every
10190            Linux platform */
10191         return get_errno(safe_flock(arg1, arg2));
10192     case TARGET_NR_readv:
10193         {
10194             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10195             if (vec != NULL) {
10196                 ret = get_errno(safe_readv(arg1, vec, arg3));
10197                 unlock_iovec(vec, arg2, arg3, 1);
10198             } else {
10199                 ret = -host_to_target_errno(errno);
10200             }
10201         }
10202         return ret;
10203     case TARGET_NR_writev:
10204         {
10205             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10206             if (vec != NULL) {
10207                 ret = get_errno(safe_writev(arg1, vec, arg3));
10208                 unlock_iovec(vec, arg2, arg3, 0);
10209             } else {
10210                 ret = -host_to_target_errno(errno);
10211             }
10212         }
10213         return ret;
10214 #if defined(TARGET_NR_preadv)
10215     case TARGET_NR_preadv:
10216         {
10217             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10218             if (vec != NULL) {
10219                 unsigned long low, high;
10220 
10221                 target_to_host_low_high(arg4, arg5, &low, &high);
10222                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10223                 unlock_iovec(vec, arg2, arg3, 1);
10224             } else {
10225                 ret = -host_to_target_errno(errno);
10226            }
10227         }
10228         return ret;
10229 #endif
10230 #if defined(TARGET_NR_pwritev)
10231     case TARGET_NR_pwritev:
10232         {
10233             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10234             if (vec != NULL) {
10235                 unsigned long low, high;
10236 
10237                 target_to_host_low_high(arg4, arg5, &low, &high);
10238                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10239                 unlock_iovec(vec, arg2, arg3, 0);
10240             } else {
10241                 ret = -host_to_target_errno(errno);
10242            }
10243         }
10244         return ret;
10245 #endif
10246     case TARGET_NR_getsid:
10247         return get_errno(getsid(arg1));
10248 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10249     case TARGET_NR_fdatasync:
10250         return get_errno(fdatasync(arg1));
10251 #endif
10252 #ifdef TARGET_NR__sysctl
10253     case TARGET_NR__sysctl:
10254         /* We don't implement this, but ENOTDIR is always a safe
10255            return value. */
10256         return -TARGET_ENOTDIR;
10257 #endif
10258     case TARGET_NR_sched_getaffinity:
10259         {
10260             unsigned int mask_size;
10261             unsigned long *mask;
10262 
10263             /*
10264              * sched_getaffinity needs multiples of ulong, so need to take
10265              * care of mismatches between target ulong and host ulong sizes.
10266              */
10267             if (arg2 & (sizeof(abi_ulong) - 1)) {
10268                 return -TARGET_EINVAL;
10269             }
10270             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10271 
10272             mask = alloca(mask_size);
10273             memset(mask, 0, mask_size);
10274             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10275 
10276             if (!is_error(ret)) {
10277                 if (ret > arg2) {
10278                     /* More data returned than the caller's buffer will fit.
10279                      * This only happens if sizeof(abi_long) < sizeof(long)
10280                      * and the caller passed us a buffer holding an odd number
10281                      * of abi_longs. If the host kernel is actually using the
10282                      * extra 4 bytes then fail EINVAL; otherwise we can just
10283                      * ignore them and only copy the interesting part.
10284                      */
10285                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10286                     if (numcpus > arg2 * 8) {
10287                         return -TARGET_EINVAL;
10288                     }
10289                     ret = arg2;
10290                 }
10291 
10292                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10293                     return -TARGET_EFAULT;
10294                 }
10295             }
10296         }
10297         return ret;
10298     case TARGET_NR_sched_setaffinity:
10299         {
10300             unsigned int mask_size;
10301             unsigned long *mask;
10302 
10303             /*
10304              * sched_setaffinity needs multiples of ulong, so need to take
10305              * care of mismatches between target ulong and host ulong sizes.
10306              */
10307             if (arg2 & (sizeof(abi_ulong) - 1)) {
10308                 return -TARGET_EINVAL;
10309             }
10310             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10311             mask = alloca(mask_size);
10312 
10313             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10314             if (ret) {
10315                 return ret;
10316             }
10317 
10318             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10319         }
10320     case TARGET_NR_getcpu:
10321         {
10322             unsigned cpu, node;
10323             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10324                                        arg2 ? &node : NULL,
10325                                        NULL));
10326             if (is_error(ret)) {
10327                 return ret;
10328             }
10329             if (arg1 && put_user_u32(cpu, arg1)) {
10330                 return -TARGET_EFAULT;
10331             }
10332             if (arg2 && put_user_u32(node, arg2)) {
10333                 return -TARGET_EFAULT;
10334             }
10335         }
10336         return ret;
10337     case TARGET_NR_sched_setparam:
10338         {
10339             struct sched_param *target_schp;
10340             struct sched_param schp;
10341 
10342             if (arg2 == 0) {
10343                 return -TARGET_EINVAL;
10344             }
10345             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10346                 return -TARGET_EFAULT;
10347             schp.sched_priority = tswap32(target_schp->sched_priority);
10348             unlock_user_struct(target_schp, arg2, 0);
10349             return get_errno(sched_setparam(arg1, &schp));
10350         }
10351     case TARGET_NR_sched_getparam:
10352         {
10353             struct sched_param *target_schp;
10354             struct sched_param schp;
10355 
10356             if (arg2 == 0) {
10357                 return -TARGET_EINVAL;
10358             }
10359             ret = get_errno(sched_getparam(arg1, &schp));
10360             if (!is_error(ret)) {
10361                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10362                     return -TARGET_EFAULT;
10363                 target_schp->sched_priority = tswap32(schp.sched_priority);
10364                 unlock_user_struct(target_schp, arg2, 1);
10365             }
10366         }
10367         return ret;
10368     case TARGET_NR_sched_setscheduler:
10369         {
10370             struct sched_param *target_schp;
10371             struct sched_param schp;
10372             if (arg3 == 0) {
10373                 return -TARGET_EINVAL;
10374             }
10375             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10376                 return -TARGET_EFAULT;
10377             schp.sched_priority = tswap32(target_schp->sched_priority);
10378             unlock_user_struct(target_schp, arg3, 0);
10379             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10380         }
10381     case TARGET_NR_sched_getscheduler:
10382         return get_errno(sched_getscheduler(arg1));
10383     case TARGET_NR_sched_yield:
10384         return get_errno(sched_yield());
10385     case TARGET_NR_sched_get_priority_max:
10386         return get_errno(sched_get_priority_max(arg1));
10387     case TARGET_NR_sched_get_priority_min:
10388         return get_errno(sched_get_priority_min(arg1));
10389 #ifdef TARGET_NR_sched_rr_get_interval
10390     case TARGET_NR_sched_rr_get_interval:
10391         {
10392             struct timespec ts;
10393             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10394             if (!is_error(ret)) {
10395                 ret = host_to_target_timespec(arg2, &ts);
10396             }
10397         }
10398         return ret;
10399 #endif
10400 #if defined(TARGET_NR_nanosleep)
10401     case TARGET_NR_nanosleep:
10402         {
10403             struct timespec req, rem;
10404             target_to_host_timespec(&req, arg1);
10405             ret = get_errno(safe_nanosleep(&req, &rem));
10406             if (is_error(ret) && arg2) {
10407                 host_to_target_timespec(arg2, &rem);
10408             }
10409         }
10410         return ret;
10411 #endif
10412     case TARGET_NR_prctl:
10413         switch (arg1) {
10414         case PR_GET_PDEATHSIG:
10415         {
10416             int deathsig;
10417             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10418             if (!is_error(ret) && arg2
10419                 && put_user_ual(deathsig, arg2)) {
10420                 return -TARGET_EFAULT;
10421             }
10422             return ret;
10423         }
10424 #ifdef PR_GET_NAME
10425         case PR_GET_NAME:
10426         {
10427             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10428             if (!name) {
10429                 return -TARGET_EFAULT;
10430             }
10431             ret = get_errno(prctl(arg1, (unsigned long)name,
10432                                   arg3, arg4, arg5));
10433             unlock_user(name, arg2, 16);
10434             return ret;
10435         }
10436         case PR_SET_NAME:
10437         {
10438             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10439             if (!name) {
10440                 return -TARGET_EFAULT;
10441             }
10442             ret = get_errno(prctl(arg1, (unsigned long)name,
10443                                   arg3, arg4, arg5));
10444             unlock_user(name, arg2, 0);
10445             return ret;
10446         }
10447 #endif
10448 #ifdef TARGET_MIPS
10449         case TARGET_PR_GET_FP_MODE:
10450         {
10451             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10452             ret = 0;
10453             if (env->CP0_Status & (1 << CP0St_FR)) {
10454                 ret |= TARGET_PR_FP_MODE_FR;
10455             }
10456             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10457                 ret |= TARGET_PR_FP_MODE_FRE;
10458             }
10459             return ret;
10460         }
10461         case TARGET_PR_SET_FP_MODE:
10462         {
10463             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10464             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10465             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10466             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10467             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10468 
10469             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10470                                             TARGET_PR_FP_MODE_FRE;
10471 
10472             /* If nothing to change, return right away, successfully.  */
10473             if (old_fr == new_fr && old_fre == new_fre) {
10474                 return 0;
10475             }
10476             /* Check the value is valid */
10477             if (arg2 & ~known_bits) {
10478                 return -TARGET_EOPNOTSUPP;
10479             }
10480             /* Setting FRE without FR is not supported.  */
10481             if (new_fre && !new_fr) {
10482                 return -TARGET_EOPNOTSUPP;
10483             }
10484             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10485                 /* FR1 is not supported */
10486                 return -TARGET_EOPNOTSUPP;
10487             }
10488             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10489                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10490                 /* cannot set FR=0 */
10491                 return -TARGET_EOPNOTSUPP;
10492             }
10493             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10494                 /* Cannot set FRE=1 */
10495                 return -TARGET_EOPNOTSUPP;
10496             }
10497 
10498             int i;
10499             fpr_t *fpr = env->active_fpu.fpr;
10500             for (i = 0; i < 32 ; i += 2) {
10501                 if (!old_fr && new_fr) {
10502                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10503                 } else if (old_fr && !new_fr) {
10504                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10505                 }
10506             }
10507 
10508             if (new_fr) {
10509                 env->CP0_Status |= (1 << CP0St_FR);
10510                 env->hflags |= MIPS_HFLAG_F64;
10511             } else {
10512                 env->CP0_Status &= ~(1 << CP0St_FR);
10513                 env->hflags &= ~MIPS_HFLAG_F64;
10514             }
10515             if (new_fre) {
10516                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10517                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10518                     env->hflags |= MIPS_HFLAG_FRE;
10519                 }
10520             } else {
10521                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10522                 env->hflags &= ~MIPS_HFLAG_FRE;
10523             }
10524 
10525             return 0;
10526         }
10527 #endif /* MIPS */
10528 #ifdef TARGET_AARCH64
10529         case TARGET_PR_SVE_SET_VL:
10530             /*
10531              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10532              * PR_SVE_VL_INHERIT.  Note the kernel definition
10533              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10534              * even though the current architectural maximum is VQ=16.
10535              */
10536             ret = -TARGET_EINVAL;
10537             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10538                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10539                 CPUARMState *env = cpu_env;
10540                 ARMCPU *cpu = env_archcpu(env);
10541                 uint32_t vq, old_vq;
10542 
10543                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10544                 vq = MAX(arg2 / 16, 1);
10545                 vq = MIN(vq, cpu->sve_max_vq);
10546 
10547                 if (vq < old_vq) {
10548                     aarch64_sve_narrow_vq(env, vq);
10549                 }
10550                 env->vfp.zcr_el[1] = vq - 1;
10551                 arm_rebuild_hflags(env);
10552                 ret = vq * 16;
10553             }
10554             return ret;
10555         case TARGET_PR_SVE_GET_VL:
10556             ret = -TARGET_EINVAL;
10557             {
10558                 ARMCPU *cpu = env_archcpu(cpu_env);
10559                 if (cpu_isar_feature(aa64_sve, cpu)) {
10560                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10561                 }
10562             }
10563             return ret;
10564         case TARGET_PR_PAC_RESET_KEYS:
10565             {
10566                 CPUARMState *env = cpu_env;
10567                 ARMCPU *cpu = env_archcpu(env);
10568 
10569                 if (arg3 || arg4 || arg5) {
10570                     return -TARGET_EINVAL;
10571                 }
10572                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10573                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10574                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10575                                TARGET_PR_PAC_APGAKEY);
10576                     int ret = 0;
10577                     Error *err = NULL;
10578 
10579                     if (arg2 == 0) {
10580                         arg2 = all;
10581                     } else if (arg2 & ~all) {
10582                         return -TARGET_EINVAL;
10583                     }
10584                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10585                         ret |= qemu_guest_getrandom(&env->keys.apia,
10586                                                     sizeof(ARMPACKey), &err);
10587                     }
10588                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10589                         ret |= qemu_guest_getrandom(&env->keys.apib,
10590                                                     sizeof(ARMPACKey), &err);
10591                     }
10592                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10593                         ret |= qemu_guest_getrandom(&env->keys.apda,
10594                                                     sizeof(ARMPACKey), &err);
10595                     }
10596                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10597                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10598                                                     sizeof(ARMPACKey), &err);
10599                     }
10600                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10601                         ret |= qemu_guest_getrandom(&env->keys.apga,
10602                                                     sizeof(ARMPACKey), &err);
10603                     }
10604                     if (ret != 0) {
10605                         /*
10606                          * Some unknown failure in the crypto.  The best
10607                          * we can do is log it and fail the syscall.
10608                          * The real syscall cannot fail this way.
10609                          */
10610                         qemu_log_mask(LOG_UNIMP,
10611                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10612                                       error_get_pretty(err));
10613                         error_free(err);
10614                         return -TARGET_EIO;
10615                     }
10616                     return 0;
10617                 }
10618             }
10619             return -TARGET_EINVAL;
10620 #endif /* AARCH64 */
10621         case PR_GET_SECCOMP:
10622         case PR_SET_SECCOMP:
10623             /* Disable seccomp to prevent the target disabling syscalls we
10624              * need. */
10625             return -TARGET_EINVAL;
10626         default:
10627             /* Most prctl options have no pointer arguments */
10628             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10629         }
10630         break;
10631 #ifdef TARGET_NR_arch_prctl
10632     case TARGET_NR_arch_prctl:
10633         return do_arch_prctl(cpu_env, arg1, arg2);
10634 #endif
10635 #ifdef TARGET_NR_pread64
10636     case TARGET_NR_pread64:
10637         if (regpairs_aligned(cpu_env, num)) {
10638             arg4 = arg5;
10639             arg5 = arg6;
10640         }
10641         if (arg2 == 0 && arg3 == 0) {
10642             /* Special-case NULL buffer and zero length, which should succeed */
10643             p = 0;
10644         } else {
10645             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10646             if (!p) {
10647                 return -TARGET_EFAULT;
10648             }
10649         }
10650         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10651         unlock_user(p, arg2, ret);
10652         return ret;
10653     case TARGET_NR_pwrite64:
10654         if (regpairs_aligned(cpu_env, num)) {
10655             arg4 = arg5;
10656             arg5 = arg6;
10657         }
10658         if (arg2 == 0 && arg3 == 0) {
10659             /* Special-case NULL buffer and zero length, which should succeed */
10660             p = 0;
10661         } else {
10662             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10663             if (!p) {
10664                 return -TARGET_EFAULT;
10665             }
10666         }
10667         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10668         unlock_user(p, arg2, 0);
10669         return ret;
10670 #endif
10671     case TARGET_NR_getcwd:
10672         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10673             return -TARGET_EFAULT;
10674         ret = get_errno(sys_getcwd1(p, arg2));
10675         unlock_user(p, arg1, ret);
10676         return ret;
10677     case TARGET_NR_capget:
10678     case TARGET_NR_capset:
10679     {
10680         struct target_user_cap_header *target_header;
10681         struct target_user_cap_data *target_data = NULL;
10682         struct __user_cap_header_struct header;
10683         struct __user_cap_data_struct data[2];
10684         struct __user_cap_data_struct *dataptr = NULL;
10685         int i, target_datalen;
10686         int data_items = 1;
10687 
10688         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10689             return -TARGET_EFAULT;
10690         }
10691         header.version = tswap32(target_header->version);
10692         header.pid = tswap32(target_header->pid);
10693 
10694         if (header.version != _LINUX_CAPABILITY_VERSION) {
10695             /* Version 2 and up takes pointer to two user_data structs */
10696             data_items = 2;
10697         }
10698 
10699         target_datalen = sizeof(*target_data) * data_items;
10700 
10701         if (arg2) {
10702             if (num == TARGET_NR_capget) {
10703                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10704             } else {
10705                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10706             }
10707             if (!target_data) {
10708                 unlock_user_struct(target_header, arg1, 0);
10709                 return -TARGET_EFAULT;
10710             }
10711 
10712             if (num == TARGET_NR_capset) {
10713                 for (i = 0; i < data_items; i++) {
10714                     data[i].effective = tswap32(target_data[i].effective);
10715                     data[i].permitted = tswap32(target_data[i].permitted);
10716                     data[i].inheritable = tswap32(target_data[i].inheritable);
10717                 }
10718             }
10719 
10720             dataptr = data;
10721         }
10722 
10723         if (num == TARGET_NR_capget) {
10724             ret = get_errno(capget(&header, dataptr));
10725         } else {
10726             ret = get_errno(capset(&header, dataptr));
10727         }
10728 
10729         /* The kernel always updates version for both capget and capset */
10730         target_header->version = tswap32(header.version);
10731         unlock_user_struct(target_header, arg1, 1);
10732 
10733         if (arg2) {
10734             if (num == TARGET_NR_capget) {
10735                 for (i = 0; i < data_items; i++) {
10736                     target_data[i].effective = tswap32(data[i].effective);
10737                     target_data[i].permitted = tswap32(data[i].permitted);
10738                     target_data[i].inheritable = tswap32(data[i].inheritable);
10739                 }
10740                 unlock_user(target_data, arg2, target_datalen);
10741             } else {
10742                 unlock_user(target_data, arg2, 0);
10743             }
10744         }
10745         return ret;
10746     }
10747     case TARGET_NR_sigaltstack:
10748         return do_sigaltstack(arg1, arg2,
10749                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10750 
10751 #ifdef CONFIG_SENDFILE
10752 #ifdef TARGET_NR_sendfile
10753     case TARGET_NR_sendfile:
10754     {
10755         off_t *offp = NULL;
10756         off_t off;
10757         if (arg3) {
10758             ret = get_user_sal(off, arg3);
10759             if (is_error(ret)) {
10760                 return ret;
10761             }
10762             offp = &off;
10763         }
10764         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10765         if (!is_error(ret) && arg3) {
10766             abi_long ret2 = put_user_sal(off, arg3);
10767             if (is_error(ret2)) {
10768                 ret = ret2;
10769             }
10770         }
10771         return ret;
10772     }
10773 #endif
10774 #ifdef TARGET_NR_sendfile64
10775     case TARGET_NR_sendfile64:
10776     {
10777         off_t *offp = NULL;
10778         off_t off;
10779         if (arg3) {
10780             ret = get_user_s64(off, arg3);
10781             if (is_error(ret)) {
10782                 return ret;
10783             }
10784             offp = &off;
10785         }
10786         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10787         if (!is_error(ret) && arg3) {
10788             abi_long ret2 = put_user_s64(off, arg3);
10789             if (is_error(ret2)) {
10790                 ret = ret2;
10791             }
10792         }
10793         return ret;
10794     }
10795 #endif
10796 #endif
10797 #ifdef TARGET_NR_vfork
10798     case TARGET_NR_vfork:
10799         return get_errno(do_fork(cpu_env,
10800                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10801                          0, 0, 0, 0));
10802 #endif
10803 #ifdef TARGET_NR_ugetrlimit
10804     case TARGET_NR_ugetrlimit:
10805     {
10806 	struct rlimit rlim;
10807 	int resource = target_to_host_resource(arg1);
10808 	ret = get_errno(getrlimit(resource, &rlim));
10809 	if (!is_error(ret)) {
10810 	    struct target_rlimit *target_rlim;
10811             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10812                 return -TARGET_EFAULT;
10813 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10814 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10815             unlock_user_struct(target_rlim, arg2, 1);
10816 	}
10817         return ret;
10818     }
10819 #endif
10820 #ifdef TARGET_NR_truncate64
10821     case TARGET_NR_truncate64:
10822         if (!(p = lock_user_string(arg1)))
10823             return -TARGET_EFAULT;
10824 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10825         unlock_user(p, arg1, 0);
10826         return ret;
10827 #endif
10828 #ifdef TARGET_NR_ftruncate64
10829     case TARGET_NR_ftruncate64:
10830         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10831 #endif
10832 #ifdef TARGET_NR_stat64
10833     case TARGET_NR_stat64:
10834         if (!(p = lock_user_string(arg1))) {
10835             return -TARGET_EFAULT;
10836         }
10837         ret = get_errno(stat(path(p), &st));
10838         unlock_user(p, arg1, 0);
10839         if (!is_error(ret))
10840             ret = host_to_target_stat64(cpu_env, arg2, &st);
10841         return ret;
10842 #endif
10843 #ifdef TARGET_NR_lstat64
10844     case TARGET_NR_lstat64:
10845         if (!(p = lock_user_string(arg1))) {
10846             return -TARGET_EFAULT;
10847         }
10848         ret = get_errno(lstat(path(p), &st));
10849         unlock_user(p, arg1, 0);
10850         if (!is_error(ret))
10851             ret = host_to_target_stat64(cpu_env, arg2, &st);
10852         return ret;
10853 #endif
10854 #ifdef TARGET_NR_fstat64
10855     case TARGET_NR_fstat64:
10856         ret = get_errno(fstat(arg1, &st));
10857         if (!is_error(ret))
10858             ret = host_to_target_stat64(cpu_env, arg2, &st);
10859         return ret;
10860 #endif
10861 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10862 #ifdef TARGET_NR_fstatat64
10863     case TARGET_NR_fstatat64:
10864 #endif
10865 #ifdef TARGET_NR_newfstatat
10866     case TARGET_NR_newfstatat:
10867 #endif
10868         if (!(p = lock_user_string(arg2))) {
10869             return -TARGET_EFAULT;
10870         }
10871         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10872         unlock_user(p, arg2, 0);
10873         if (!is_error(ret))
10874             ret = host_to_target_stat64(cpu_env, arg3, &st);
10875         return ret;
10876 #endif
10877 #if defined(TARGET_NR_statx)
10878     case TARGET_NR_statx:
10879         {
10880             struct target_statx *target_stx;
10881             int dirfd = arg1;
10882             int flags = arg3;
10883 
10884             p = lock_user_string(arg2);
10885             if (p == NULL) {
10886                 return -TARGET_EFAULT;
10887             }
10888 #if defined(__NR_statx)
10889             {
10890                 /*
10891                  * It is assumed that struct statx is architecture independent.
10892                  */
10893                 struct target_statx host_stx;
10894                 int mask = arg4;
10895 
10896                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10897                 if (!is_error(ret)) {
10898                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10899                         unlock_user(p, arg2, 0);
10900                         return -TARGET_EFAULT;
10901                     }
10902                 }
10903 
10904                 if (ret != -TARGET_ENOSYS) {
10905                     unlock_user(p, arg2, 0);
10906                     return ret;
10907                 }
10908             }
10909 #endif
10910             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10911             unlock_user(p, arg2, 0);
10912 
10913             if (!is_error(ret)) {
10914                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10915                     return -TARGET_EFAULT;
10916                 }
10917                 memset(target_stx, 0, sizeof(*target_stx));
10918                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10919                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10920                 __put_user(st.st_ino, &target_stx->stx_ino);
10921                 __put_user(st.st_mode, &target_stx->stx_mode);
10922                 __put_user(st.st_uid, &target_stx->stx_uid);
10923                 __put_user(st.st_gid, &target_stx->stx_gid);
10924                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10925                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10926                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10927                 __put_user(st.st_size, &target_stx->stx_size);
10928                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10929                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10930                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10931                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10932                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10933                 unlock_user_struct(target_stx, arg5, 1);
10934             }
10935         }
10936         return ret;
10937 #endif
10938 #ifdef TARGET_NR_lchown
10939     case TARGET_NR_lchown:
10940         if (!(p = lock_user_string(arg1)))
10941             return -TARGET_EFAULT;
10942         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10943         unlock_user(p, arg1, 0);
10944         return ret;
10945 #endif
10946 #ifdef TARGET_NR_getuid
10947     case TARGET_NR_getuid:
10948         return get_errno(high2lowuid(getuid()));
10949 #endif
10950 #ifdef TARGET_NR_getgid
10951     case TARGET_NR_getgid:
10952         return get_errno(high2lowgid(getgid()));
10953 #endif
10954 #ifdef TARGET_NR_geteuid
10955     case TARGET_NR_geteuid:
10956         return get_errno(high2lowuid(geteuid()));
10957 #endif
10958 #ifdef TARGET_NR_getegid
10959     case TARGET_NR_getegid:
10960         return get_errno(high2lowgid(getegid()));
10961 #endif
10962     case TARGET_NR_setreuid:
10963         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10964     case TARGET_NR_setregid:
10965         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10966     case TARGET_NR_getgroups:
10967         {
10968             int gidsetsize = arg1;
10969             target_id *target_grouplist;
10970             gid_t *grouplist;
10971             int i;
10972 
10973             grouplist = alloca(gidsetsize * sizeof(gid_t));
10974             ret = get_errno(getgroups(gidsetsize, grouplist));
10975             if (gidsetsize == 0)
10976                 return ret;
10977             if (!is_error(ret)) {
10978                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10979                 if (!target_grouplist)
10980                     return -TARGET_EFAULT;
10981                 for(i = 0;i < ret; i++)
10982                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10983                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10984             }
10985         }
10986         return ret;
10987     case TARGET_NR_setgroups:
10988         {
10989             int gidsetsize = arg1;
10990             target_id *target_grouplist;
10991             gid_t *grouplist = NULL;
10992             int i;
10993             if (gidsetsize) {
10994                 grouplist = alloca(gidsetsize * sizeof(gid_t));
10995                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10996                 if (!target_grouplist) {
10997                     return -TARGET_EFAULT;
10998                 }
10999                 for (i = 0; i < gidsetsize; i++) {
11000                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11001                 }
11002                 unlock_user(target_grouplist, arg2, 0);
11003             }
11004             return get_errno(setgroups(gidsetsize, grouplist));
11005         }
11006     case TARGET_NR_fchown:
11007         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11008 #if defined(TARGET_NR_fchownat)
11009     case TARGET_NR_fchownat:
11010         if (!(p = lock_user_string(arg2)))
11011             return -TARGET_EFAULT;
11012         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11013                                  low2highgid(arg4), arg5));
11014         unlock_user(p, arg2, 0);
11015         return ret;
11016 #endif
11017 #ifdef TARGET_NR_setresuid
11018     case TARGET_NR_setresuid:
11019         return get_errno(sys_setresuid(low2highuid(arg1),
11020                                        low2highuid(arg2),
11021                                        low2highuid(arg3)));
11022 #endif
11023 #ifdef TARGET_NR_getresuid
11024     case TARGET_NR_getresuid:
11025         {
11026             uid_t ruid, euid, suid;
11027             ret = get_errno(getresuid(&ruid, &euid, &suid));
11028             if (!is_error(ret)) {
11029                 if (put_user_id(high2lowuid(ruid), arg1)
11030                     || put_user_id(high2lowuid(euid), arg2)
11031                     || put_user_id(high2lowuid(suid), arg3))
11032                     return -TARGET_EFAULT;
11033             }
11034         }
11035         return ret;
11036 #endif
11037 #ifdef TARGET_NR_getresgid
11038     case TARGET_NR_setresgid:
11039         return get_errno(sys_setresgid(low2highgid(arg1),
11040                                        low2highgid(arg2),
11041                                        low2highgid(arg3)));
11042 #endif
11043 #ifdef TARGET_NR_getresgid
11044     case TARGET_NR_getresgid:
11045         {
11046             gid_t rgid, egid, sgid;
11047             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11048             if (!is_error(ret)) {
11049                 if (put_user_id(high2lowgid(rgid), arg1)
11050                     || put_user_id(high2lowgid(egid), arg2)
11051                     || put_user_id(high2lowgid(sgid), arg3))
11052                     return -TARGET_EFAULT;
11053             }
11054         }
11055         return ret;
11056 #endif
11057 #ifdef TARGET_NR_chown
11058     case TARGET_NR_chown:
11059         if (!(p = lock_user_string(arg1)))
11060             return -TARGET_EFAULT;
11061         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11062         unlock_user(p, arg1, 0);
11063         return ret;
11064 #endif
11065     case TARGET_NR_setuid:
11066         return get_errno(sys_setuid(low2highuid(arg1)));
11067     case TARGET_NR_setgid:
11068         return get_errno(sys_setgid(low2highgid(arg1)));
11069     case TARGET_NR_setfsuid:
11070         return get_errno(setfsuid(arg1));
11071     case TARGET_NR_setfsgid:
11072         return get_errno(setfsgid(arg1));
11073 
11074 #ifdef TARGET_NR_lchown32
11075     case TARGET_NR_lchown32:
11076         if (!(p = lock_user_string(arg1)))
11077             return -TARGET_EFAULT;
11078         ret = get_errno(lchown(p, arg2, arg3));
11079         unlock_user(p, arg1, 0);
11080         return ret;
11081 #endif
11082 #ifdef TARGET_NR_getuid32
11083     case TARGET_NR_getuid32:
11084         return get_errno(getuid());
11085 #endif
11086 
11087 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11088    /* Alpha specific */
11089     case TARGET_NR_getxuid:
11090          {
11091             uid_t euid;
11092             euid=geteuid();
11093             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11094          }
11095         return get_errno(getuid());
11096 #endif
11097 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11098    /* Alpha specific */
11099     case TARGET_NR_getxgid:
11100          {
11101             uid_t egid;
11102             egid=getegid();
11103             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11104          }
11105         return get_errno(getgid());
11106 #endif
11107 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11108     /* Alpha specific */
11109     case TARGET_NR_osf_getsysinfo:
11110         ret = -TARGET_EOPNOTSUPP;
11111         switch (arg1) {
11112           case TARGET_GSI_IEEE_FP_CONTROL:
11113             {
11114                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11115                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11116 
11117                 swcr &= ~SWCR_STATUS_MASK;
11118                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11119 
11120                 if (put_user_u64 (swcr, arg2))
11121                         return -TARGET_EFAULT;
11122                 ret = 0;
11123             }
11124             break;
11125 
11126           /* case GSI_IEEE_STATE_AT_SIGNAL:
11127              -- Not implemented in linux kernel.
11128              case GSI_UACPROC:
11129              -- Retrieves current unaligned access state; not much used.
11130              case GSI_PROC_TYPE:
11131              -- Retrieves implver information; surely not used.
11132              case GSI_GET_HWRPB:
11133              -- Grabs a copy of the HWRPB; surely not used.
11134           */
11135         }
11136         return ret;
11137 #endif
11138 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11139     /* Alpha specific */
11140     case TARGET_NR_osf_setsysinfo:
11141         ret = -TARGET_EOPNOTSUPP;
11142         switch (arg1) {
11143           case TARGET_SSI_IEEE_FP_CONTROL:
11144             {
11145                 uint64_t swcr, fpcr;
11146 
11147                 if (get_user_u64 (swcr, arg2)) {
11148                     return -TARGET_EFAULT;
11149                 }
11150 
11151                 /*
11152                  * The kernel calls swcr_update_status to update the
11153                  * status bits from the fpcr at every point that it
11154                  * could be queried.  Therefore, we store the status
11155                  * bits only in FPCR.
11156                  */
11157                 ((CPUAlphaState *)cpu_env)->swcr
11158                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11159 
11160                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11161                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11162                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11163                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11164                 ret = 0;
11165             }
11166             break;
11167 
11168           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11169             {
11170                 uint64_t exc, fpcr, fex;
11171 
11172                 if (get_user_u64(exc, arg2)) {
11173                     return -TARGET_EFAULT;
11174                 }
11175                 exc &= SWCR_STATUS_MASK;
11176                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11177 
11178                 /* Old exceptions are not signaled.  */
11179                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11180                 fex = exc & ~fex;
11181                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11182                 fex &= ((CPUArchState *)cpu_env)->swcr;
11183 
11184                 /* Update the hardware fpcr.  */
11185                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11186                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11187 
11188                 if (fex) {
11189                     int si_code = TARGET_FPE_FLTUNK;
11190                     target_siginfo_t info;
11191 
11192                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11193                         si_code = TARGET_FPE_FLTUND;
11194                     }
11195                     if (fex & SWCR_TRAP_ENABLE_INE) {
11196                         si_code = TARGET_FPE_FLTRES;
11197                     }
11198                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11199                         si_code = TARGET_FPE_FLTUND;
11200                     }
11201                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11202                         si_code = TARGET_FPE_FLTOVF;
11203                     }
11204                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11205                         si_code = TARGET_FPE_FLTDIV;
11206                     }
11207                     if (fex & SWCR_TRAP_ENABLE_INV) {
11208                         si_code = TARGET_FPE_FLTINV;
11209                     }
11210 
11211                     info.si_signo = SIGFPE;
11212                     info.si_errno = 0;
11213                     info.si_code = si_code;
11214                     info._sifields._sigfault._addr
11215                         = ((CPUArchState *)cpu_env)->pc;
11216                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11217                                  QEMU_SI_FAULT, &info);
11218                 }
11219                 ret = 0;
11220             }
11221             break;
11222 
11223           /* case SSI_NVPAIRS:
11224              -- Used with SSIN_UACPROC to enable unaligned accesses.
11225              case SSI_IEEE_STATE_AT_SIGNAL:
11226              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11227              -- Not implemented in linux kernel
11228           */
11229         }
11230         return ret;
11231 #endif
11232 #ifdef TARGET_NR_osf_sigprocmask
11233     /* Alpha specific.  */
11234     case TARGET_NR_osf_sigprocmask:
11235         {
11236             abi_ulong mask;
11237             int how;
11238             sigset_t set, oldset;
11239 
11240             switch(arg1) {
11241             case TARGET_SIG_BLOCK:
11242                 how = SIG_BLOCK;
11243                 break;
11244             case TARGET_SIG_UNBLOCK:
11245                 how = SIG_UNBLOCK;
11246                 break;
11247             case TARGET_SIG_SETMASK:
11248                 how = SIG_SETMASK;
11249                 break;
11250             default:
11251                 return -TARGET_EINVAL;
11252             }
11253             mask = arg2;
11254             target_to_host_old_sigset(&set, &mask);
11255             ret = do_sigprocmask(how, &set, &oldset);
11256             if (!ret) {
11257                 host_to_target_old_sigset(&mask, &oldset);
11258                 ret = mask;
11259             }
11260         }
11261         return ret;
11262 #endif
11263 
11264 #ifdef TARGET_NR_getgid32
11265     case TARGET_NR_getgid32:
11266         return get_errno(getgid());
11267 #endif
11268 #ifdef TARGET_NR_geteuid32
11269     case TARGET_NR_geteuid32:
11270         return get_errno(geteuid());
11271 #endif
11272 #ifdef TARGET_NR_getegid32
11273     case TARGET_NR_getegid32:
11274         return get_errno(getegid());
11275 #endif
11276 #ifdef TARGET_NR_setreuid32
11277     case TARGET_NR_setreuid32:
11278         return get_errno(setreuid(arg1, arg2));
11279 #endif
11280 #ifdef TARGET_NR_setregid32
11281     case TARGET_NR_setregid32:
11282         return get_errno(setregid(arg1, arg2));
11283 #endif
11284 #ifdef TARGET_NR_getgroups32
11285     case TARGET_NR_getgroups32:
11286         {
11287             int gidsetsize = arg1;
11288             uint32_t *target_grouplist;
11289             gid_t *grouplist;
11290             int i;
11291 
11292             grouplist = alloca(gidsetsize * sizeof(gid_t));
11293             ret = get_errno(getgroups(gidsetsize, grouplist));
11294             if (gidsetsize == 0)
11295                 return ret;
11296             if (!is_error(ret)) {
11297                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11298                 if (!target_grouplist) {
11299                     return -TARGET_EFAULT;
11300                 }
11301                 for(i = 0;i < ret; i++)
11302                     target_grouplist[i] = tswap32(grouplist[i]);
11303                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11304             }
11305         }
11306         return ret;
11307 #endif
11308 #ifdef TARGET_NR_setgroups32
11309     case TARGET_NR_setgroups32:
11310         {
11311             int gidsetsize = arg1;
11312             uint32_t *target_grouplist;
11313             gid_t *grouplist;
11314             int i;
11315 
11316             grouplist = alloca(gidsetsize * sizeof(gid_t));
11317             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11318             if (!target_grouplist) {
11319                 return -TARGET_EFAULT;
11320             }
11321             for(i = 0;i < gidsetsize; i++)
11322                 grouplist[i] = tswap32(target_grouplist[i]);
11323             unlock_user(target_grouplist, arg2, 0);
11324             return get_errno(setgroups(gidsetsize, grouplist));
11325         }
11326 #endif
11327 #ifdef TARGET_NR_fchown32
11328     case TARGET_NR_fchown32:
11329         return get_errno(fchown(arg1, arg2, arg3));
11330 #endif
11331 #ifdef TARGET_NR_setresuid32
11332     case TARGET_NR_setresuid32:
11333         return get_errno(sys_setresuid(arg1, arg2, arg3));
11334 #endif
11335 #ifdef TARGET_NR_getresuid32
11336     case TARGET_NR_getresuid32:
11337         {
11338             uid_t ruid, euid, suid;
11339             ret = get_errno(getresuid(&ruid, &euid, &suid));
11340             if (!is_error(ret)) {
11341                 if (put_user_u32(ruid, arg1)
11342                     || put_user_u32(euid, arg2)
11343                     || put_user_u32(suid, arg3))
11344                     return -TARGET_EFAULT;
11345             }
11346         }
11347         return ret;
11348 #endif
11349 #ifdef TARGET_NR_setresgid32
11350     case TARGET_NR_setresgid32:
11351         return get_errno(sys_setresgid(arg1, arg2, arg3));
11352 #endif
11353 #ifdef TARGET_NR_getresgid32
11354     case TARGET_NR_getresgid32:
11355         {
11356             gid_t rgid, egid, sgid;
11357             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11358             if (!is_error(ret)) {
11359                 if (put_user_u32(rgid, arg1)
11360                     || put_user_u32(egid, arg2)
11361                     || put_user_u32(sgid, arg3))
11362                     return -TARGET_EFAULT;
11363             }
11364         }
11365         return ret;
11366 #endif
11367 #ifdef TARGET_NR_chown32
11368     case TARGET_NR_chown32:
11369         if (!(p = lock_user_string(arg1)))
11370             return -TARGET_EFAULT;
11371         ret = get_errno(chown(p, arg2, arg3));
11372         unlock_user(p, arg1, 0);
11373         return ret;
11374 #endif
11375 #ifdef TARGET_NR_setuid32
11376     case TARGET_NR_setuid32:
11377         return get_errno(sys_setuid(arg1));
11378 #endif
11379 #ifdef TARGET_NR_setgid32
11380     case TARGET_NR_setgid32:
11381         return get_errno(sys_setgid(arg1));
11382 #endif
11383 #ifdef TARGET_NR_setfsuid32
11384     case TARGET_NR_setfsuid32:
11385         return get_errno(setfsuid(arg1));
11386 #endif
11387 #ifdef TARGET_NR_setfsgid32
11388     case TARGET_NR_setfsgid32:
11389         return get_errno(setfsgid(arg1));
11390 #endif
11391 #ifdef TARGET_NR_mincore
11392     case TARGET_NR_mincore:
11393         {
11394             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11395             if (!a) {
11396                 return -TARGET_ENOMEM;
11397             }
11398             p = lock_user_string(arg3);
11399             if (!p) {
11400                 ret = -TARGET_EFAULT;
11401             } else {
11402                 ret = get_errno(mincore(a, arg2, p));
11403                 unlock_user(p, arg3, ret);
11404             }
11405             unlock_user(a, arg1, 0);
11406         }
11407         return ret;
11408 #endif
11409 #ifdef TARGET_NR_arm_fadvise64_64
11410     case TARGET_NR_arm_fadvise64_64:
11411         /* arm_fadvise64_64 looks like fadvise64_64 but
11412          * with different argument order: fd, advice, offset, len
11413          * rather than the usual fd, offset, len, advice.
11414          * Note that offset and len are both 64-bit so appear as
11415          * pairs of 32-bit registers.
11416          */
11417         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11418                             target_offset64(arg5, arg6), arg2);
11419         return -host_to_target_errno(ret);
11420 #endif
11421 
11422 #if TARGET_ABI_BITS == 32
11423 
11424 #ifdef TARGET_NR_fadvise64_64
11425     case TARGET_NR_fadvise64_64:
11426 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11427         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11428         ret = arg2;
11429         arg2 = arg3;
11430         arg3 = arg4;
11431         arg4 = arg5;
11432         arg5 = arg6;
11433         arg6 = ret;
11434 #else
11435         /* 6 args: fd, offset (high, low), len (high, low), advice */
11436         if (regpairs_aligned(cpu_env, num)) {
11437             /* offset is in (3,4), len in (5,6) and advice in 7 */
11438             arg2 = arg3;
11439             arg3 = arg4;
11440             arg4 = arg5;
11441             arg5 = arg6;
11442             arg6 = arg7;
11443         }
11444 #endif
11445         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11446                             target_offset64(arg4, arg5), arg6);
11447         return -host_to_target_errno(ret);
11448 #endif
11449 
11450 #ifdef TARGET_NR_fadvise64
11451     case TARGET_NR_fadvise64:
11452         /* 5 args: fd, offset (high, low), len, advice */
11453         if (regpairs_aligned(cpu_env, num)) {
11454             /* offset is in (3,4), len in 5 and advice in 6 */
11455             arg2 = arg3;
11456             arg3 = arg4;
11457             arg4 = arg5;
11458             arg5 = arg6;
11459         }
11460         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11461         return -host_to_target_errno(ret);
11462 #endif
11463 
11464 #else /* not a 32-bit ABI */
11465 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11466 #ifdef TARGET_NR_fadvise64_64
11467     case TARGET_NR_fadvise64_64:
11468 #endif
11469 #ifdef TARGET_NR_fadvise64
11470     case TARGET_NR_fadvise64:
11471 #endif
11472 #ifdef TARGET_S390X
11473         switch (arg4) {
11474         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11475         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11476         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11477         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11478         default: break;
11479         }
11480 #endif
11481         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11482 #endif
11483 #endif /* end of 64-bit ABI fadvise handling */
11484 
11485 #ifdef TARGET_NR_madvise
11486     case TARGET_NR_madvise:
11487         /* A straight passthrough may not be safe because qemu sometimes
11488            turns private file-backed mappings into anonymous mappings.
11489            This will break MADV_DONTNEED.
11490            This is a hint, so ignoring and returning success is ok.  */
11491         return 0;
11492 #endif
11493 #ifdef TARGET_NR_fcntl64
11494     case TARGET_NR_fcntl64:
11495     {
11496         int cmd;
11497         struct flock64 fl;
11498         from_flock64_fn *copyfrom = copy_from_user_flock64;
11499         to_flock64_fn *copyto = copy_to_user_flock64;
11500 
11501 #ifdef TARGET_ARM
11502         if (!((CPUARMState *)cpu_env)->eabi) {
11503             copyfrom = copy_from_user_oabi_flock64;
11504             copyto = copy_to_user_oabi_flock64;
11505         }
11506 #endif
11507 
11508         cmd = target_to_host_fcntl_cmd(arg2);
11509         if (cmd == -TARGET_EINVAL) {
11510             return cmd;
11511         }
11512 
11513         switch(arg2) {
11514         case TARGET_F_GETLK64:
11515             ret = copyfrom(&fl, arg3);
11516             if (ret) {
11517                 break;
11518             }
11519             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11520             if (ret == 0) {
11521                 ret = copyto(arg3, &fl);
11522             }
11523 	    break;
11524 
11525         case TARGET_F_SETLK64:
11526         case TARGET_F_SETLKW64:
11527             ret = copyfrom(&fl, arg3);
11528             if (ret) {
11529                 break;
11530             }
11531             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11532 	    break;
11533         default:
11534             ret = do_fcntl(arg1, arg2, arg3);
11535             break;
11536         }
11537         return ret;
11538     }
11539 #endif
11540 #ifdef TARGET_NR_cacheflush
11541     case TARGET_NR_cacheflush:
11542         /* self-modifying code is handled automatically, so nothing needed */
11543         return 0;
11544 #endif
11545 #ifdef TARGET_NR_getpagesize
11546     case TARGET_NR_getpagesize:
11547         return TARGET_PAGE_SIZE;
11548 #endif
11549     case TARGET_NR_gettid:
11550         return get_errno(sys_gettid());
11551 #ifdef TARGET_NR_readahead
11552     case TARGET_NR_readahead:
11553 #if TARGET_ABI_BITS == 32
11554         if (regpairs_aligned(cpu_env, num)) {
11555             arg2 = arg3;
11556             arg3 = arg4;
11557             arg4 = arg5;
11558         }
11559         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11560 #else
11561         ret = get_errno(readahead(arg1, arg2, arg3));
11562 #endif
11563         return ret;
11564 #endif
11565 #ifdef CONFIG_ATTR
11566 #ifdef TARGET_NR_setxattr
11567     case TARGET_NR_listxattr:
11568     case TARGET_NR_llistxattr:
11569     {
11570         void *p, *b = 0;
11571         if (arg2) {
11572             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11573             if (!b) {
11574                 return -TARGET_EFAULT;
11575             }
11576         }
11577         p = lock_user_string(arg1);
11578         if (p) {
11579             if (num == TARGET_NR_listxattr) {
11580                 ret = get_errno(listxattr(p, b, arg3));
11581             } else {
11582                 ret = get_errno(llistxattr(p, b, arg3));
11583             }
11584         } else {
11585             ret = -TARGET_EFAULT;
11586         }
11587         unlock_user(p, arg1, 0);
11588         unlock_user(b, arg2, arg3);
11589         return ret;
11590     }
11591     case TARGET_NR_flistxattr:
11592     {
11593         void *b = 0;
11594         if (arg2) {
11595             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11596             if (!b) {
11597                 return -TARGET_EFAULT;
11598             }
11599         }
11600         ret = get_errno(flistxattr(arg1, b, arg3));
11601         unlock_user(b, arg2, arg3);
11602         return ret;
11603     }
11604     case TARGET_NR_setxattr:
11605     case TARGET_NR_lsetxattr:
11606         {
11607             void *p, *n, *v = 0;
11608             if (arg3) {
11609                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11610                 if (!v) {
11611                     return -TARGET_EFAULT;
11612                 }
11613             }
11614             p = lock_user_string(arg1);
11615             n = lock_user_string(arg2);
11616             if (p && n) {
11617                 if (num == TARGET_NR_setxattr) {
11618                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11619                 } else {
11620                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11621                 }
11622             } else {
11623                 ret = -TARGET_EFAULT;
11624             }
11625             unlock_user(p, arg1, 0);
11626             unlock_user(n, arg2, 0);
11627             unlock_user(v, arg3, 0);
11628         }
11629         return ret;
11630     case TARGET_NR_fsetxattr:
11631         {
11632             void *n, *v = 0;
11633             if (arg3) {
11634                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11635                 if (!v) {
11636                     return -TARGET_EFAULT;
11637                 }
11638             }
11639             n = lock_user_string(arg2);
11640             if (n) {
11641                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11642             } else {
11643                 ret = -TARGET_EFAULT;
11644             }
11645             unlock_user(n, arg2, 0);
11646             unlock_user(v, arg3, 0);
11647         }
11648         return ret;
11649     case TARGET_NR_getxattr:
11650     case TARGET_NR_lgetxattr:
11651         {
11652             void *p, *n, *v = 0;
11653             if (arg3) {
11654                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11655                 if (!v) {
11656                     return -TARGET_EFAULT;
11657                 }
11658             }
11659             p = lock_user_string(arg1);
11660             n = lock_user_string(arg2);
11661             if (p && n) {
11662                 if (num == TARGET_NR_getxattr) {
11663                     ret = get_errno(getxattr(p, n, v, arg4));
11664                 } else {
11665                     ret = get_errno(lgetxattr(p, n, v, arg4));
11666                 }
11667             } else {
11668                 ret = -TARGET_EFAULT;
11669             }
11670             unlock_user(p, arg1, 0);
11671             unlock_user(n, arg2, 0);
11672             unlock_user(v, arg3, arg4);
11673         }
11674         return ret;
11675     case TARGET_NR_fgetxattr:
11676         {
11677             void *n, *v = 0;
11678             if (arg3) {
11679                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11680                 if (!v) {
11681                     return -TARGET_EFAULT;
11682                 }
11683             }
11684             n = lock_user_string(arg2);
11685             if (n) {
11686                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11687             } else {
11688                 ret = -TARGET_EFAULT;
11689             }
11690             unlock_user(n, arg2, 0);
11691             unlock_user(v, arg3, arg4);
11692         }
11693         return ret;
11694     case TARGET_NR_removexattr:
11695     case TARGET_NR_lremovexattr:
11696         {
11697             void *p, *n;
11698             p = lock_user_string(arg1);
11699             n = lock_user_string(arg2);
11700             if (p && n) {
11701                 if (num == TARGET_NR_removexattr) {
11702                     ret = get_errno(removexattr(p, n));
11703                 } else {
11704                     ret = get_errno(lremovexattr(p, n));
11705                 }
11706             } else {
11707                 ret = -TARGET_EFAULT;
11708             }
11709             unlock_user(p, arg1, 0);
11710             unlock_user(n, arg2, 0);
11711         }
11712         return ret;
11713     case TARGET_NR_fremovexattr:
11714         {
11715             void *n;
11716             n = lock_user_string(arg2);
11717             if (n) {
11718                 ret = get_errno(fremovexattr(arg1, n));
11719             } else {
11720                 ret = -TARGET_EFAULT;
11721             }
11722             unlock_user(n, arg2, 0);
11723         }
11724         return ret;
11725 #endif
11726 #endif /* CONFIG_ATTR */
11727 #ifdef TARGET_NR_set_thread_area
11728     case TARGET_NR_set_thread_area:
11729 #if defined(TARGET_MIPS)
11730       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11731       return 0;
11732 #elif defined(TARGET_CRIS)
11733       if (arg1 & 0xff)
11734           ret = -TARGET_EINVAL;
11735       else {
11736           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11737           ret = 0;
11738       }
11739       return ret;
11740 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11741       return do_set_thread_area(cpu_env, arg1);
11742 #elif defined(TARGET_M68K)
11743       {
11744           TaskState *ts = cpu->opaque;
11745           ts->tp_value = arg1;
11746           return 0;
11747       }
11748 #else
11749       return -TARGET_ENOSYS;
11750 #endif
11751 #endif
11752 #ifdef TARGET_NR_get_thread_area
11753     case TARGET_NR_get_thread_area:
11754 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11755         return do_get_thread_area(cpu_env, arg1);
11756 #elif defined(TARGET_M68K)
11757         {
11758             TaskState *ts = cpu->opaque;
11759             return ts->tp_value;
11760         }
11761 #else
11762         return -TARGET_ENOSYS;
11763 #endif
11764 #endif
11765 #ifdef TARGET_NR_getdomainname
11766     case TARGET_NR_getdomainname:
11767         return -TARGET_ENOSYS;
11768 #endif
11769 
11770 #ifdef TARGET_NR_clock_settime
11771     case TARGET_NR_clock_settime:
11772     {
11773         struct timespec ts;
11774 
11775         ret = target_to_host_timespec(&ts, arg2);
11776         if (!is_error(ret)) {
11777             ret = get_errno(clock_settime(arg1, &ts));
11778         }
11779         return ret;
11780     }
11781 #endif
11782 #ifdef TARGET_NR_clock_settime64
11783     case TARGET_NR_clock_settime64:
11784     {
11785         struct timespec ts;
11786 
11787         ret = target_to_host_timespec64(&ts, arg2);
11788         if (!is_error(ret)) {
11789             ret = get_errno(clock_settime(arg1, &ts));
11790         }
11791         return ret;
11792     }
11793 #endif
11794 #ifdef TARGET_NR_clock_gettime
11795     case TARGET_NR_clock_gettime:
11796     {
11797         struct timespec ts;
11798         ret = get_errno(clock_gettime(arg1, &ts));
11799         if (!is_error(ret)) {
11800             ret = host_to_target_timespec(arg2, &ts);
11801         }
11802         return ret;
11803     }
11804 #endif
11805 #ifdef TARGET_NR_clock_gettime64
11806     case TARGET_NR_clock_gettime64:
11807     {
11808         struct timespec ts;
11809         ret = get_errno(clock_gettime(arg1, &ts));
11810         if (!is_error(ret)) {
11811             ret = host_to_target_timespec64(arg2, &ts);
11812         }
11813         return ret;
11814     }
11815 #endif
11816 #ifdef TARGET_NR_clock_getres
11817     case TARGET_NR_clock_getres:
11818     {
11819         struct timespec ts;
11820         ret = get_errno(clock_getres(arg1, &ts));
11821         if (!is_error(ret)) {
11822             host_to_target_timespec(arg2, &ts);
11823         }
11824         return ret;
11825     }
11826 #endif
11827 #ifdef TARGET_NR_clock_nanosleep
11828     case TARGET_NR_clock_nanosleep:
11829     {
11830         struct timespec ts;
11831         target_to_host_timespec(&ts, arg3);
11832         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11833                                              &ts, arg4 ? &ts : NULL));
11834         if (arg4)
11835             host_to_target_timespec(arg4, &ts);
11836 
11837 #if defined(TARGET_PPC)
11838         /* clock_nanosleep is odd in that it returns positive errno values.
11839          * On PPC, CR0 bit 3 should be set in such a situation. */
11840         if (ret && ret != -TARGET_ERESTARTSYS) {
11841             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11842         }
11843 #endif
11844         return ret;
11845     }
11846 #endif
11847 
11848 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11849     case TARGET_NR_set_tid_address:
11850         return get_errno(set_tid_address((int *)g2h(arg1)));
11851 #endif
11852 
11853     case TARGET_NR_tkill:
11854         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11855 
11856     case TARGET_NR_tgkill:
11857         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11858                          target_to_host_signal(arg3)));
11859 
11860 #ifdef TARGET_NR_set_robust_list
11861     case TARGET_NR_set_robust_list:
11862     case TARGET_NR_get_robust_list:
11863         /* The ABI for supporting robust futexes has userspace pass
11864          * the kernel a pointer to a linked list which is updated by
11865          * userspace after the syscall; the list is walked by the kernel
11866          * when the thread exits. Since the linked list in QEMU guest
11867          * memory isn't a valid linked list for the host and we have
11868          * no way to reliably intercept the thread-death event, we can't
11869          * support these. Silently return ENOSYS so that guest userspace
11870          * falls back to a non-robust futex implementation (which should
11871          * be OK except in the corner case of the guest crashing while
11872          * holding a mutex that is shared with another process via
11873          * shared memory).
11874          */
11875         return -TARGET_ENOSYS;
11876 #endif
11877 
11878 #if defined(TARGET_NR_utimensat)
11879     case TARGET_NR_utimensat:
11880         {
11881             struct timespec *tsp, ts[2];
11882             if (!arg3) {
11883                 tsp = NULL;
11884             } else {
11885                 target_to_host_timespec(ts, arg3);
11886                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11887                 tsp = ts;
11888             }
11889             if (!arg2)
11890                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11891             else {
11892                 if (!(p = lock_user_string(arg2))) {
11893                     return -TARGET_EFAULT;
11894                 }
11895                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11896                 unlock_user(p, arg2, 0);
11897             }
11898         }
11899         return ret;
11900 #endif
11901 #ifdef TARGET_NR_futex
11902     case TARGET_NR_futex:
11903         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11904 #endif
11905 #ifdef TARGET_NR_futex_time64
11906     case TARGET_NR_futex_time64:
11907         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11908 #endif
11909 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11910     case TARGET_NR_inotify_init:
11911         ret = get_errno(sys_inotify_init());
11912         if (ret >= 0) {
11913             fd_trans_register(ret, &target_inotify_trans);
11914         }
11915         return ret;
11916 #endif
11917 #ifdef CONFIG_INOTIFY1
11918 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11919     case TARGET_NR_inotify_init1:
11920         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11921                                           fcntl_flags_tbl)));
11922         if (ret >= 0) {
11923             fd_trans_register(ret, &target_inotify_trans);
11924         }
11925         return ret;
11926 #endif
11927 #endif
11928 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11929     case TARGET_NR_inotify_add_watch:
11930         p = lock_user_string(arg2);
11931         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11932         unlock_user(p, arg2, 0);
11933         return ret;
11934 #endif
11935 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11936     case TARGET_NR_inotify_rm_watch:
11937         return get_errno(sys_inotify_rm_watch(arg1, arg2));
11938 #endif
11939 
11940 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11941     case TARGET_NR_mq_open:
11942         {
11943             struct mq_attr posix_mq_attr;
11944             struct mq_attr *pposix_mq_attr;
11945             int host_flags;
11946 
11947             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11948             pposix_mq_attr = NULL;
11949             if (arg4) {
11950                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11951                     return -TARGET_EFAULT;
11952                 }
11953                 pposix_mq_attr = &posix_mq_attr;
11954             }
11955             p = lock_user_string(arg1 - 1);
11956             if (!p) {
11957                 return -TARGET_EFAULT;
11958             }
11959             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11960             unlock_user (p, arg1, 0);
11961         }
11962         return ret;
11963 
11964     case TARGET_NR_mq_unlink:
11965         p = lock_user_string(arg1 - 1);
11966         if (!p) {
11967             return -TARGET_EFAULT;
11968         }
11969         ret = get_errno(mq_unlink(p));
11970         unlock_user (p, arg1, 0);
11971         return ret;
11972 
11973 #ifdef TARGET_NR_mq_timedsend
11974     case TARGET_NR_mq_timedsend:
11975         {
11976             struct timespec ts;
11977 
11978             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11979             if (arg5 != 0) {
11980                 target_to_host_timespec(&ts, arg5);
11981                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11982                 host_to_target_timespec(arg5, &ts);
11983             } else {
11984                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11985             }
11986             unlock_user (p, arg2, arg3);
11987         }
11988         return ret;
11989 #endif
11990 
11991 #ifdef TARGET_NR_mq_timedreceive
11992     case TARGET_NR_mq_timedreceive:
11993         {
11994             struct timespec ts;
11995             unsigned int prio;
11996 
11997             p = lock_user (VERIFY_READ, arg2, arg3, 1);
11998             if (arg5 != 0) {
11999                 target_to_host_timespec(&ts, arg5);
12000                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12001                                                      &prio, &ts));
12002                 host_to_target_timespec(arg5, &ts);
12003             } else {
12004                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12005                                                      &prio, NULL));
12006             }
12007             unlock_user (p, arg2, arg3);
12008             if (arg4 != 0)
12009                 put_user_u32(prio, arg4);
12010         }
12011         return ret;
12012 #endif
12013 
12014     /* Not implemented for now... */
12015 /*     case TARGET_NR_mq_notify: */
12016 /*         break; */
12017 
12018     case TARGET_NR_mq_getsetattr:
12019         {
12020             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12021             ret = 0;
12022             if (arg2 != 0) {
12023                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12024                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12025                                            &posix_mq_attr_out));
12026             } else if (arg3 != 0) {
12027                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12028             }
12029             if (ret == 0 && arg3 != 0) {
12030                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12031             }
12032         }
12033         return ret;
12034 #endif
12035 
12036 #ifdef CONFIG_SPLICE
12037 #ifdef TARGET_NR_tee
12038     case TARGET_NR_tee:
12039         {
12040             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12041         }
12042         return ret;
12043 #endif
12044 #ifdef TARGET_NR_splice
12045     case TARGET_NR_splice:
12046         {
12047             loff_t loff_in, loff_out;
12048             loff_t *ploff_in = NULL, *ploff_out = NULL;
12049             if (arg2) {
12050                 if (get_user_u64(loff_in, arg2)) {
12051                     return -TARGET_EFAULT;
12052                 }
12053                 ploff_in = &loff_in;
12054             }
12055             if (arg4) {
12056                 if (get_user_u64(loff_out, arg4)) {
12057                     return -TARGET_EFAULT;
12058                 }
12059                 ploff_out = &loff_out;
12060             }
12061             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12062             if (arg2) {
12063                 if (put_user_u64(loff_in, arg2)) {
12064                     return -TARGET_EFAULT;
12065                 }
12066             }
12067             if (arg4) {
12068                 if (put_user_u64(loff_out, arg4)) {
12069                     return -TARGET_EFAULT;
12070                 }
12071             }
12072         }
12073         return ret;
12074 #endif
12075 #ifdef TARGET_NR_vmsplice
12076 	case TARGET_NR_vmsplice:
12077         {
12078             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12079             if (vec != NULL) {
12080                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12081                 unlock_iovec(vec, arg2, arg3, 0);
12082             } else {
12083                 ret = -host_to_target_errno(errno);
12084             }
12085         }
12086         return ret;
12087 #endif
12088 #endif /* CONFIG_SPLICE */
12089 #ifdef CONFIG_EVENTFD
12090 #if defined(TARGET_NR_eventfd)
12091     case TARGET_NR_eventfd:
12092         ret = get_errno(eventfd(arg1, 0));
12093         if (ret >= 0) {
12094             fd_trans_register(ret, &target_eventfd_trans);
12095         }
12096         return ret;
12097 #endif
12098 #if defined(TARGET_NR_eventfd2)
12099     case TARGET_NR_eventfd2:
12100     {
12101         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12102         if (arg2 & TARGET_O_NONBLOCK) {
12103             host_flags |= O_NONBLOCK;
12104         }
12105         if (arg2 & TARGET_O_CLOEXEC) {
12106             host_flags |= O_CLOEXEC;
12107         }
12108         ret = get_errno(eventfd(arg1, host_flags));
12109         if (ret >= 0) {
12110             fd_trans_register(ret, &target_eventfd_trans);
12111         }
12112         return ret;
12113     }
12114 #endif
12115 #endif /* CONFIG_EVENTFD  */
12116 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12117     case TARGET_NR_fallocate:
12118 #if TARGET_ABI_BITS == 32
12119         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12120                                   target_offset64(arg5, arg6)));
12121 #else
12122         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12123 #endif
12124         return ret;
12125 #endif
12126 #if defined(CONFIG_SYNC_FILE_RANGE)
12127 #if defined(TARGET_NR_sync_file_range)
12128     case TARGET_NR_sync_file_range:
12129 #if TARGET_ABI_BITS == 32
12130 #if defined(TARGET_MIPS)
12131         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12132                                         target_offset64(arg5, arg6), arg7));
12133 #else
12134         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12135                                         target_offset64(arg4, arg5), arg6));
12136 #endif /* !TARGET_MIPS */
12137 #else
12138         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12139 #endif
12140         return ret;
12141 #endif
12142 #if defined(TARGET_NR_sync_file_range2) || \
12143     defined(TARGET_NR_arm_sync_file_range)
12144 #if defined(TARGET_NR_sync_file_range2)
12145     case TARGET_NR_sync_file_range2:
12146 #endif
12147 #if defined(TARGET_NR_arm_sync_file_range)
12148     case TARGET_NR_arm_sync_file_range:
12149 #endif
12150         /* This is like sync_file_range but the arguments are reordered */
12151 #if TARGET_ABI_BITS == 32
12152         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12153                                         target_offset64(arg5, arg6), arg2));
12154 #else
12155         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12156 #endif
12157         return ret;
12158 #endif
12159 #endif
12160 #if defined(TARGET_NR_signalfd4)
12161     case TARGET_NR_signalfd4:
12162         return do_signalfd4(arg1, arg2, arg4);
12163 #endif
12164 #if defined(TARGET_NR_signalfd)
12165     case TARGET_NR_signalfd:
12166         return do_signalfd4(arg1, arg2, 0);
12167 #endif
12168 #if defined(CONFIG_EPOLL)
12169 #if defined(TARGET_NR_epoll_create)
12170     case TARGET_NR_epoll_create:
12171         return get_errno(epoll_create(arg1));
12172 #endif
12173 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12174     case TARGET_NR_epoll_create1:
12175         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12176 #endif
12177 #if defined(TARGET_NR_epoll_ctl)
12178     case TARGET_NR_epoll_ctl:
12179     {
12180         struct epoll_event ep;
12181         struct epoll_event *epp = 0;
12182         if (arg4) {
12183             struct target_epoll_event *target_ep;
12184             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12185                 return -TARGET_EFAULT;
12186             }
12187             ep.events = tswap32(target_ep->events);
12188             /* The epoll_data_t union is just opaque data to the kernel,
12189              * so we transfer all 64 bits across and need not worry what
12190              * actual data type it is.
12191              */
12192             ep.data.u64 = tswap64(target_ep->data.u64);
12193             unlock_user_struct(target_ep, arg4, 0);
12194             epp = &ep;
12195         }
12196         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12197     }
12198 #endif
12199 
12200 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12201 #if defined(TARGET_NR_epoll_wait)
12202     case TARGET_NR_epoll_wait:
12203 #endif
12204 #if defined(TARGET_NR_epoll_pwait)
12205     case TARGET_NR_epoll_pwait:
12206 #endif
12207     {
12208         struct target_epoll_event *target_ep;
12209         struct epoll_event *ep;
12210         int epfd = arg1;
12211         int maxevents = arg3;
12212         int timeout = arg4;
12213 
12214         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12215             return -TARGET_EINVAL;
12216         }
12217 
12218         target_ep = lock_user(VERIFY_WRITE, arg2,
12219                               maxevents * sizeof(struct target_epoll_event), 1);
12220         if (!target_ep) {
12221             return -TARGET_EFAULT;
12222         }
12223 
12224         ep = g_try_new(struct epoll_event, maxevents);
12225         if (!ep) {
12226             unlock_user(target_ep, arg2, 0);
12227             return -TARGET_ENOMEM;
12228         }
12229 
12230         switch (num) {
12231 #if defined(TARGET_NR_epoll_pwait)
12232         case TARGET_NR_epoll_pwait:
12233         {
12234             target_sigset_t *target_set;
12235             sigset_t _set, *set = &_set;
12236 
12237             if (arg5) {
12238                 if (arg6 != sizeof(target_sigset_t)) {
12239                     ret = -TARGET_EINVAL;
12240                     break;
12241                 }
12242 
12243                 target_set = lock_user(VERIFY_READ, arg5,
12244                                        sizeof(target_sigset_t), 1);
12245                 if (!target_set) {
12246                     ret = -TARGET_EFAULT;
12247                     break;
12248                 }
12249                 target_to_host_sigset(set, target_set);
12250                 unlock_user(target_set, arg5, 0);
12251             } else {
12252                 set = NULL;
12253             }
12254 
12255             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12256                                              set, SIGSET_T_SIZE));
12257             break;
12258         }
12259 #endif
12260 #if defined(TARGET_NR_epoll_wait)
12261         case TARGET_NR_epoll_wait:
12262             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12263                                              NULL, 0));
12264             break;
12265 #endif
12266         default:
12267             ret = -TARGET_ENOSYS;
12268         }
12269         if (!is_error(ret)) {
12270             int i;
12271             for (i = 0; i < ret; i++) {
12272                 target_ep[i].events = tswap32(ep[i].events);
12273                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12274             }
12275             unlock_user(target_ep, arg2,
12276                         ret * sizeof(struct target_epoll_event));
12277         } else {
12278             unlock_user(target_ep, arg2, 0);
12279         }
12280         g_free(ep);
12281         return ret;
12282     }
12283 #endif
12284 #endif
12285 #ifdef TARGET_NR_prlimit64
12286     case TARGET_NR_prlimit64:
12287     {
12288         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12289         struct target_rlimit64 *target_rnew, *target_rold;
12290         struct host_rlimit64 rnew, rold, *rnewp = 0;
12291         int resource = target_to_host_resource(arg2);
12292 
12293         if (arg3 && (resource != RLIMIT_AS &&
12294                      resource != RLIMIT_DATA &&
12295                      resource != RLIMIT_STACK)) {
12296             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12297                 return -TARGET_EFAULT;
12298             }
12299             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12300             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12301             unlock_user_struct(target_rnew, arg3, 0);
12302             rnewp = &rnew;
12303         }
12304 
12305         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12306         if (!is_error(ret) && arg4) {
12307             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12308                 return -TARGET_EFAULT;
12309             }
12310             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12311             target_rold->rlim_max = tswap64(rold.rlim_max);
12312             unlock_user_struct(target_rold, arg4, 1);
12313         }
12314         return ret;
12315     }
12316 #endif
12317 #ifdef TARGET_NR_gethostname
12318     case TARGET_NR_gethostname:
12319     {
12320         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12321         if (name) {
12322             ret = get_errno(gethostname(name, arg2));
12323             unlock_user(name, arg1, arg2);
12324         } else {
12325             ret = -TARGET_EFAULT;
12326         }
12327         return ret;
12328     }
12329 #endif
12330 #ifdef TARGET_NR_atomic_cmpxchg_32
12331     case TARGET_NR_atomic_cmpxchg_32:
12332     {
12333         /* should use start_exclusive from main.c */
12334         abi_ulong mem_value;
12335         if (get_user_u32(mem_value, arg6)) {
12336             target_siginfo_t info;
12337             info.si_signo = SIGSEGV;
12338             info.si_errno = 0;
12339             info.si_code = TARGET_SEGV_MAPERR;
12340             info._sifields._sigfault._addr = arg6;
12341             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12342                          QEMU_SI_FAULT, &info);
12343             ret = 0xdeadbeef;
12344 
12345         }
12346         if (mem_value == arg2)
12347             put_user_u32(arg1, arg6);
12348         return mem_value;
12349     }
12350 #endif
12351 #ifdef TARGET_NR_atomic_barrier
12352     case TARGET_NR_atomic_barrier:
12353         /* Like the kernel implementation and the
12354            qemu arm barrier, no-op this? */
12355         return 0;
12356 #endif
12357 
12358 #ifdef TARGET_NR_timer_create
12359     case TARGET_NR_timer_create:
12360     {
12361         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12362 
12363         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12364 
12365         int clkid = arg1;
12366         int timer_index = next_free_host_timer();
12367 
12368         if (timer_index < 0) {
12369             ret = -TARGET_EAGAIN;
12370         } else {
12371             timer_t *phtimer = g_posix_timers  + timer_index;
12372 
12373             if (arg2) {
12374                 phost_sevp = &host_sevp;
12375                 ret = target_to_host_sigevent(phost_sevp, arg2);
12376                 if (ret != 0) {
12377                     return ret;
12378                 }
12379             }
12380 
12381             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12382             if (ret) {
12383                 phtimer = NULL;
12384             } else {
12385                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12386                     return -TARGET_EFAULT;
12387                 }
12388             }
12389         }
12390         return ret;
12391     }
12392 #endif
12393 
12394 #ifdef TARGET_NR_timer_settime
12395     case TARGET_NR_timer_settime:
12396     {
12397         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12398          * struct itimerspec * old_value */
12399         target_timer_t timerid = get_timer_id(arg1);
12400 
12401         if (timerid < 0) {
12402             ret = timerid;
12403         } else if (arg3 == 0) {
12404             ret = -TARGET_EINVAL;
12405         } else {
12406             timer_t htimer = g_posix_timers[timerid];
12407             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12408 
12409             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12410                 return -TARGET_EFAULT;
12411             }
12412             ret = get_errno(
12413                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12414             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12415                 return -TARGET_EFAULT;
12416             }
12417         }
12418         return ret;
12419     }
12420 #endif
12421 
12422 #ifdef TARGET_NR_timer_gettime
12423     case TARGET_NR_timer_gettime:
12424     {
12425         /* args: timer_t timerid, struct itimerspec *curr_value */
12426         target_timer_t timerid = get_timer_id(arg1);
12427 
12428         if (timerid < 0) {
12429             ret = timerid;
12430         } else if (!arg2) {
12431             ret = -TARGET_EFAULT;
12432         } else {
12433             timer_t htimer = g_posix_timers[timerid];
12434             struct itimerspec hspec;
12435             ret = get_errno(timer_gettime(htimer, &hspec));
12436 
12437             if (host_to_target_itimerspec(arg2, &hspec)) {
12438                 ret = -TARGET_EFAULT;
12439             }
12440         }
12441         return ret;
12442     }
12443 #endif
12444 
12445 #ifdef TARGET_NR_timer_getoverrun
12446     case TARGET_NR_timer_getoverrun:
12447     {
12448         /* args: timer_t timerid */
12449         target_timer_t timerid = get_timer_id(arg1);
12450 
12451         if (timerid < 0) {
12452             ret = timerid;
12453         } else {
12454             timer_t htimer = g_posix_timers[timerid];
12455             ret = get_errno(timer_getoverrun(htimer));
12456         }
12457         return ret;
12458     }
12459 #endif
12460 
12461 #ifdef TARGET_NR_timer_delete
12462     case TARGET_NR_timer_delete:
12463     {
12464         /* args: timer_t timerid */
12465         target_timer_t timerid = get_timer_id(arg1);
12466 
12467         if (timerid < 0) {
12468             ret = timerid;
12469         } else {
12470             timer_t htimer = g_posix_timers[timerid];
12471             ret = get_errno(timer_delete(htimer));
12472             g_posix_timers[timerid] = 0;
12473         }
12474         return ret;
12475     }
12476 #endif
12477 
12478 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12479     case TARGET_NR_timerfd_create:
12480         return get_errno(timerfd_create(arg1,
12481                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12482 #endif
12483 
12484 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12485     case TARGET_NR_timerfd_gettime:
12486         {
12487             struct itimerspec its_curr;
12488 
12489             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12490 
12491             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12492                 return -TARGET_EFAULT;
12493             }
12494         }
12495         return ret;
12496 #endif
12497 
12498 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12499     case TARGET_NR_timerfd_settime:
12500         {
12501             struct itimerspec its_new, its_old, *p_new;
12502 
12503             if (arg3) {
12504                 if (target_to_host_itimerspec(&its_new, arg3)) {
12505                     return -TARGET_EFAULT;
12506                 }
12507                 p_new = &its_new;
12508             } else {
12509                 p_new = NULL;
12510             }
12511 
12512             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12513 
12514             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12515                 return -TARGET_EFAULT;
12516             }
12517         }
12518         return ret;
12519 #endif
12520 
12521 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12522     case TARGET_NR_ioprio_get:
12523         return get_errno(ioprio_get(arg1, arg2));
12524 #endif
12525 
12526 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12527     case TARGET_NR_ioprio_set:
12528         return get_errno(ioprio_set(arg1, arg2, arg3));
12529 #endif
12530 
12531 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12532     case TARGET_NR_setns:
12533         return get_errno(setns(arg1, arg2));
12534 #endif
12535 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12536     case TARGET_NR_unshare:
12537         return get_errno(unshare(arg1));
12538 #endif
12539 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12540     case TARGET_NR_kcmp:
12541         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12542 #endif
12543 #ifdef TARGET_NR_swapcontext
12544     case TARGET_NR_swapcontext:
12545         /* PowerPC specific.  */
12546         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12547 #endif
12548 #ifdef TARGET_NR_memfd_create
12549     case TARGET_NR_memfd_create:
12550         p = lock_user_string(arg1);
12551         if (!p) {
12552             return -TARGET_EFAULT;
12553         }
12554         ret = get_errno(memfd_create(p, arg2));
12555         fd_trans_unregister(ret);
12556         unlock_user(p, arg1, 0);
12557         return ret;
12558 #endif
12559 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12560     case TARGET_NR_membarrier:
12561         return get_errno(membarrier(arg1, arg2));
12562 #endif
12563 
12564     default:
12565         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12566         return -TARGET_ENOSYS;
12567     }
12568     return ret;
12569 }
12570 
12571 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12572                     abi_long arg2, abi_long arg3, abi_long arg4,
12573                     abi_long arg5, abi_long arg6, abi_long arg7,
12574                     abi_long arg8)
12575 {
12576     CPUState *cpu = env_cpu(cpu_env);
12577     abi_long ret;
12578 
12579 #ifdef DEBUG_ERESTARTSYS
12580     /* Debug-only code for exercising the syscall-restart code paths
12581      * in the per-architecture cpu main loops: restart every syscall
12582      * the guest makes once before letting it through.
12583      */
12584     {
12585         static bool flag;
12586         flag = !flag;
12587         if (flag) {
12588             return -TARGET_ERESTARTSYS;
12589         }
12590     }
12591 #endif
12592 
12593     record_syscall_start(cpu, num, arg1,
12594                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12595 
12596     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12597         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12598     }
12599 
12600     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12601                       arg5, arg6, arg7, arg8);
12602 
12603     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12604         print_syscall_ret(num, ret, arg1, arg2, arg3, arg4, arg5, arg6);
12605     }
12606 
12607     record_syscall_return(cpu, num, ret);
12608     return ret;
12609 }
12610