xref: /openbmc/qemu/linux-user/syscall.c (revision c218b4ede4f9f8bdd210233f24ab2356f0e04d49)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
121 
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
129 
130 #ifndef CLONE_IO
131 #define CLONE_IO                0x80000000      /* Clone io context */
132 #endif
133 
134 /* We can't directly call the host clone syscall, because this will
135  * badly confuse libc (breaking mutexes, for example). So we must
136  * divide clone flags into:
137  *  * flag combinations that look like pthread_create()
138  *  * flag combinations that look like fork()
139  *  * flags we can implement within QEMU itself
140  *  * flags we can't support and will return an error for
141  */
142 /* For thread creation, all these flags must be present; for
143  * fork, none must be present.
144  */
145 #define CLONE_THREAD_FLAGS                              \
146     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
147      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 
149 /* These flags are ignored:
150  * CLONE_DETACHED is now ignored by the kernel;
151  * CLONE_IO is just an optimisation hint to the I/O scheduler
152  */
153 #define CLONE_IGNORED_FLAGS                     \
154     (CLONE_DETACHED | CLONE_IO)
155 
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS               \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
163     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
164      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 
166 #define CLONE_INVALID_FORK_FLAGS                                        \
167     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 
169 #define CLONE_INVALID_THREAD_FLAGS                                      \
170     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
171        CLONE_IGNORED_FLAGS))
172 
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174  * have almost all been allocated. We cannot support any of
175  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177  * The checks against the invalid thread masks above will catch these.
178  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
179  */
180 
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182  * once. This exercises the codepaths for restart.
183  */
184 //#define DEBUG_ERESTARTSYS
185 
186 //#include <linux/msdos_fs.h>
187 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
188 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
189 
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
197 
198 #define _syscall0(type,name)		\
199 static type name (void)			\
200 {					\
201 	return syscall(__NR_##name);	\
202 }
203 
204 #define _syscall1(type,name,type1,arg1)		\
205 static type name (type1 arg1)			\
206 {						\
207 	return syscall(__NR_##name, arg1);	\
208 }
209 
210 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
211 static type name (type1 arg1,type2 arg2)		\
212 {							\
213 	return syscall(__NR_##name, arg1, arg2);	\
214 }
215 
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3)		\
218 {								\
219 	return syscall(__NR_##name, arg1, arg2, arg3);		\
220 }
221 
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
226 }
227 
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
229 		  type5,arg5)							\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
233 }
234 
235 
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
237 		  type5,arg5,type6,arg6)					\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
239                   type6 arg6)							\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
242 }
243 
244 
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
263 
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
267 
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
272 
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
275 
276 /* For the 64-bit guest on 32-bit host case we must emulate
277  * getdents using getdents64, because otherwise the host
278  * might hand us back more dirent records than we can fit
279  * into the guest buffer after structure format conversion.
280  * Otherwise we emulate getdents with getdents if the host has it.
281  */
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
285 
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
296           loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300           siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310           const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318           unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325           void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327           struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329           struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
339 
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342           unsigned long, idx1, unsigned long, idx2)
343 #endif
344 
345 /*
346  * It is assumed that struct statx is architecture independent.
347  */
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350           unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
355 
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
358   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
359   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
360   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
361   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
362   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
363   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
364   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
365   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
366   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
367   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
368   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
370 #if defined(O_DIRECT)
371   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
372 #endif
373 #if defined(O_NOATIME)
374   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
375 #endif
376 #if defined(O_CLOEXEC)
377   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
378 #endif
379 #if defined(O_PATH)
380   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
381 #endif
382 #if defined(O_TMPFILE)
383   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
384 #endif
385   /* Don't terminate the list prematurely on 64-bit host+guest.  */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389   { 0, 0, 0, 0 }
390 };
391 
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398           const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401                          const struct timespec times[2], int flags)
402 {
403     errno = ENOSYS;
404     return -1;
405 }
406 #endif
407 #endif /* TARGET_NR_utimensat */
408 
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413           const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416                          int newfd, const char *new, int flags)
417 {
418     if (flags == 0) {
419         return renameat(oldfd, old, newfd, new);
420     }
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_renameat2 */
426 
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
429 
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
432 {
433   return (inotify_init());
434 }
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 {
439   return (inotify_add_watch(fd, pathname, mask));
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 {
445   return (inotify_rm_watch(fd, wd));
446 }
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
451 {
452   return (inotify_init1(flags));
453 }
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY  */
463 
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471     uint64_t rlim_cur;
472     uint64_t rlim_max;
473 };
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475           const struct host_rlimit64 *, new_limit,
476           struct host_rlimit64 *, old_limit)
477 #endif
478 
479 
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
483 
484 static inline int next_free_host_timer(void)
485 {
486     int k ;
487     /* FIXME: Does finding the next free slot require a lock? */
488     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489         if (g_posix_timers[k] == 0) {
490             g_posix_timers[k] = (timer_t) 1;
491             return k;
492         }
493     }
494     return -1;
495 }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728     defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738               int, maxevents, int, timeout, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742               const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759               socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769               const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #endif
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
779               const struct timespec *, req, struct timespec *, rem)
780 #endif
781 #ifdef __NR_ipc
782 #ifdef __s390x__
783 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
784               void *, ptr)
785 #else
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 #endif
789 #endif
790 #ifdef __NR_msgsnd
791 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
792               int, flags)
793 #endif
794 #ifdef __NR_msgrcv
795 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
796               long, msgtype, int, flags)
797 #endif
798 #ifdef __NR_semtimedop
799 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
800               unsigned, nsops, const struct timespec *, timeout)
801 #endif
802 #ifdef TARGET_NR_mq_timedsend
803 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
804               size_t, len, unsigned, prio, const struct timespec *, timeout)
805 #endif
806 #ifdef TARGET_NR_mq_timedreceive
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808               size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811  * "third argument might be integer or pointer or not present" behaviour of
812  * the libc function.
813  */
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817  *  use the flock64 struct rather than unsuffixed flock
818  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
819  */
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
825 
826 static inline int host_to_target_sock_type(int host_type)
827 {
828     int target_type;
829 
830     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831     case SOCK_DGRAM:
832         target_type = TARGET_SOCK_DGRAM;
833         break;
834     case SOCK_STREAM:
835         target_type = TARGET_SOCK_STREAM;
836         break;
837     default:
838         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839         break;
840     }
841 
842 #if defined(SOCK_CLOEXEC)
843     if (host_type & SOCK_CLOEXEC) {
844         target_type |= TARGET_SOCK_CLOEXEC;
845     }
846 #endif
847 
848 #if defined(SOCK_NONBLOCK)
849     if (host_type & SOCK_NONBLOCK) {
850         target_type |= TARGET_SOCK_NONBLOCK;
851     }
852 #endif
853 
854     return target_type;
855 }
856 
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
860 
861 void target_set_brk(abi_ulong new_brk)
862 {
863     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864     brk_page = HOST_PAGE_ALIGN(target_brk);
865 }
866 
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
869 
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
872 {
873     abi_long mapped_addr;
874     abi_ulong new_alloc_size;
875 
876     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
877 
878     if (!new_brk) {
879         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880         return target_brk;
881     }
882     if (new_brk < target_original_brk) {
883         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884                    target_brk);
885         return target_brk;
886     }
887 
888     /* If the new brk is less than the highest page reserved to the
889      * target heap allocation, set it and we're almost done...  */
890     if (new_brk <= brk_page) {
891         /* Heap contents are initialized to zero, as for anonymous
892          * mapped pages.  */
893         if (new_brk > target_brk) {
894             memset(g2h(target_brk), 0, new_brk - target_brk);
895         }
896 	target_brk = new_brk;
897         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 	return target_brk;
899     }
900 
901     /* We need to allocate more memory after the brk... Note that
902      * we don't use MAP_FIXED because that will map over the top of
903      * any existing mapping (like the one with the host libc or qemu
904      * itself); instead we treat "mapped but at wrong address" as
905      * a failure and unmap again.
906      */
907     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909                                         PROT_READ|PROT_WRITE,
910                                         MAP_ANON|MAP_PRIVATE, 0, 0));
911 
912     if (mapped_addr == brk_page) {
913         /* Heap contents are initialized to zero, as for anonymous
914          * mapped pages.  Technically the new pages are already
915          * initialized to zero since they *are* anonymous mapped
916          * pages, however we have to take care with the contents that
917          * come from the remaining part of the previous page: it may
918          * contains garbage data due to a previous heap usage (grown
919          * then shrunken).  */
920         memset(g2h(target_brk), 0, brk_page - target_brk);
921 
922         target_brk = new_brk;
923         brk_page = HOST_PAGE_ALIGN(target_brk);
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925             target_brk);
926         return target_brk;
927     } else if (mapped_addr != -1) {
928         /* Mapped but at wrong address, meaning there wasn't actually
929          * enough space for this brk.
930          */
931         target_munmap(mapped_addr, new_alloc_size);
932         mapped_addr = -1;
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
934     }
935     else {
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
937     }
938 
939 #if defined(TARGET_ALPHA)
940     /* We (partially) emulate OSF/1 on Alpha, which requires we
941        return a proper errno, not an unchanged brk value.  */
942     return -TARGET_ENOMEM;
943 #endif
944     /* For everything else, return the previous break. */
945     return target_brk;
946 }
947 
948 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
949     defined(TARGET_NR_pselect6)
950 static inline abi_long copy_from_user_fdset(fd_set *fds,
951                                             abi_ulong target_fds_addr,
952                                             int n)
953 {
954     int i, nw, j, k;
955     abi_ulong b, *target_fds;
956 
957     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958     if (!(target_fds = lock_user(VERIFY_READ,
959                                  target_fds_addr,
960                                  sizeof(abi_ulong) * nw,
961                                  1)))
962         return -TARGET_EFAULT;
963 
964     FD_ZERO(fds);
965     k = 0;
966     for (i = 0; i < nw; i++) {
967         /* grab the abi_ulong */
968         __get_user(b, &target_fds[i]);
969         for (j = 0; j < TARGET_ABI_BITS; j++) {
970             /* check the bit inside the abi_ulong */
971             if ((b >> j) & 1)
972                 FD_SET(k, fds);
973             k++;
974         }
975     }
976 
977     unlock_user(target_fds, target_fds_addr, 0);
978 
979     return 0;
980 }
981 
982 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
983                                                  abi_ulong target_fds_addr,
984                                                  int n)
985 {
986     if (target_fds_addr) {
987         if (copy_from_user_fdset(fds, target_fds_addr, n))
988             return -TARGET_EFAULT;
989         *fds_ptr = fds;
990     } else {
991         *fds_ptr = NULL;
992     }
993     return 0;
994 }
995 
996 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
997                                           const fd_set *fds,
998                                           int n)
999 {
1000     int i, nw, j, k;
1001     abi_long v;
1002     abi_ulong *target_fds;
1003 
1004     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1005     if (!(target_fds = lock_user(VERIFY_WRITE,
1006                                  target_fds_addr,
1007                                  sizeof(abi_ulong) * nw,
1008                                  0)))
1009         return -TARGET_EFAULT;
1010 
1011     k = 0;
1012     for (i = 0; i < nw; i++) {
1013         v = 0;
1014         for (j = 0; j < TARGET_ABI_BITS; j++) {
1015             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1016             k++;
1017         }
1018         __put_user(v, &target_fds[i]);
1019     }
1020 
1021     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1022 
1023     return 0;
1024 }
1025 #endif
1026 
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1032 
1033 static inline abi_long host_to_target_clock_t(long ticks)
1034 {
1035 #if HOST_HZ == TARGET_HZ
1036     return ticks;
1037 #else
1038     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1040 }
1041 
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043                                              const struct rusage *rusage)
1044 {
1045     struct target_rusage *target_rusage;
1046 
1047     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048         return -TARGET_EFAULT;
1049     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067     unlock_user_struct(target_rusage, target_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 #ifdef TARGET_NR_setrlimit
1073 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1074 {
1075     abi_ulong target_rlim_swap;
1076     rlim_t result;
1077 
1078     target_rlim_swap = tswapal(target_rlim);
1079     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1080         return RLIM_INFINITY;
1081 
1082     result = target_rlim_swap;
1083     if (target_rlim_swap != (rlim_t)result)
1084         return RLIM_INFINITY;
1085 
1086     return result;
1087 }
1088 #endif
1089 
1090 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1091 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1092 {
1093     abi_ulong target_rlim_swap;
1094     abi_ulong result;
1095 
1096     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1097         target_rlim_swap = TARGET_RLIM_INFINITY;
1098     else
1099         target_rlim_swap = rlim;
1100     result = tswapal(target_rlim_swap);
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __get_user(tv->tv_sec, &target_tv->tv_sec);
1154     __get_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 0);
1157 
1158     return 0;
1159 }
1160 
1161 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1162                                             const struct timeval *tv)
1163 {
1164     struct target_timeval *target_tv;
1165 
1166     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1167         return -TARGET_EFAULT;
1168     }
1169 
1170     __put_user(tv->tv_sec, &target_tv->tv_sec);
1171     __put_user(tv->tv_usec, &target_tv->tv_usec);
1172 
1173     unlock_user_struct(target_tv, target_tv_addr, 1);
1174 
1175     return 0;
1176 }
1177 
1178 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1179                                              const struct timeval *tv)
1180 {
1181     struct target__kernel_sock_timeval *target_tv;
1182 
1183     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1184         return -TARGET_EFAULT;
1185     }
1186 
1187     __put_user(tv->tv_sec, &target_tv->tv_sec);
1188     __put_user(tv->tv_usec, &target_tv->tv_usec);
1189 
1190     unlock_user_struct(target_tv, target_tv_addr, 1);
1191 
1192     return 0;
1193 }
1194 
1195 #if defined(TARGET_NR_futex) || \
1196     defined(TARGET_NR_rt_sigtimedwait) || \
1197     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202     defined(TARGET_NR_timer_settime) || \
1203     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1205                                                abi_ulong target_addr)
1206 {
1207     struct target_timespec *target_ts;
1208 
1209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210         return -TARGET_EFAULT;
1211     }
1212     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220     defined(TARGET_NR_timer_settime64) || \
1221     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1222 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1223                                                  abi_ulong target_addr)
1224 {
1225     struct target__kernel_timespec *target_ts;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1231     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1232     unlock_user_struct(target_ts, target_addr, 0);
1233     return 0;
1234 }
1235 #endif
1236 
1237 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1238                                                struct timespec *host_ts)
1239 {
1240     struct target_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
1251 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1252                                                  struct timespec *host_ts)
1253 {
1254     struct target__kernel_timespec *target_ts;
1255 
1256     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1257         return -TARGET_EFAULT;
1258     }
1259     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1260     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261     unlock_user_struct(target_ts, target_addr, 1);
1262     return 0;
1263 }
1264 
1265 #if defined(TARGET_NR_gettimeofday)
1266 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1267                                              struct timezone *tz)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 1);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_settimeofday)
1285 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1286                                                abi_ulong target_tz_addr)
1287 {
1288     struct target_timezone *target_tz;
1289 
1290     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1295     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1296 
1297     unlock_user_struct(target_tz, target_tz_addr, 0);
1298 
1299     return 0;
1300 }
1301 #endif
1302 
1303 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1304 #include <mqueue.h>
1305 
1306 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1307                                               abi_ulong target_mq_attr_addr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1312                           target_mq_attr_addr, 1))
1313         return -TARGET_EFAULT;
1314 
1315     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1321 
1322     return 0;
1323 }
1324 
1325 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1326                                             const struct mq_attr *attr)
1327 {
1328     struct target_mq_attr *target_mq_attr;
1329 
1330     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1331                           target_mq_attr_addr, 0))
1332         return -TARGET_EFAULT;
1333 
1334     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1335     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1336     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1337     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1338 
1339     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1340 
1341     return 0;
1342 }
1343 #endif
1344 
1345 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1346 /* do_select() must return target values and target errnos. */
1347 static abi_long do_select(int n,
1348                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1349                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1350 {
1351     fd_set rfds, wfds, efds;
1352     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1353     struct timeval tv;
1354     struct timespec ts, *ts_ptr;
1355     abi_long ret;
1356 
1357     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1358     if (ret) {
1359         return ret;
1360     }
1361     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1362     if (ret) {
1363         return ret;
1364     }
1365     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1366     if (ret) {
1367         return ret;
1368     }
1369 
1370     if (target_tv_addr) {
1371         if (copy_from_user_timeval(&tv, target_tv_addr))
1372             return -TARGET_EFAULT;
1373         ts.tv_sec = tv.tv_sec;
1374         ts.tv_nsec = tv.tv_usec * 1000;
1375         ts_ptr = &ts;
1376     } else {
1377         ts_ptr = NULL;
1378     }
1379 
1380     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1381                                   ts_ptr, NULL));
1382 
1383     if (!is_error(ret)) {
1384         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1385             return -TARGET_EFAULT;
1386         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1387             return -TARGET_EFAULT;
1388         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1389             return -TARGET_EFAULT;
1390 
1391         if (target_tv_addr) {
1392             tv.tv_sec = ts.tv_sec;
1393             tv.tv_usec = ts.tv_nsec / 1000;
1394             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1395                 return -TARGET_EFAULT;
1396             }
1397         }
1398     }
1399 
1400     return ret;
1401 }
1402 
1403 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1404 static abi_long do_old_select(abi_ulong arg1)
1405 {
1406     struct target_sel_arg_struct *sel;
1407     abi_ulong inp, outp, exp, tvp;
1408     long nsel;
1409 
1410     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1411         return -TARGET_EFAULT;
1412     }
1413 
1414     nsel = tswapal(sel->n);
1415     inp = tswapal(sel->inp);
1416     outp = tswapal(sel->outp);
1417     exp = tswapal(sel->exp);
1418     tvp = tswapal(sel->tvp);
1419 
1420     unlock_user_struct(sel, arg1, 0);
1421 
1422     return do_select(nsel, inp, outp, exp, tvp);
1423 }
1424 #endif
1425 #endif
1426 
1427 static abi_long do_pipe2(int host_pipe[], int flags)
1428 {
1429 #ifdef CONFIG_PIPE2
1430     return pipe2(host_pipe, flags);
1431 #else
1432     return -ENOSYS;
1433 #endif
1434 }
1435 
1436 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1437                         int flags, int is_pipe2)
1438 {
1439     int host_pipe[2];
1440     abi_long ret;
1441     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1442 
1443     if (is_error(ret))
1444         return get_errno(ret);
1445 
1446     /* Several targets have special calling conventions for the original
1447        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1448     if (!is_pipe2) {
1449 #if defined(TARGET_ALPHA)
1450         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1451         return host_pipe[0];
1452 #elif defined(TARGET_MIPS)
1453         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1454         return host_pipe[0];
1455 #elif defined(TARGET_SH4)
1456         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1457         return host_pipe[0];
1458 #elif defined(TARGET_SPARC)
1459         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1460         return host_pipe[0];
1461 #endif
1462     }
1463 
1464     if (put_user_s32(host_pipe[0], pipedes)
1465         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1466         return -TARGET_EFAULT;
1467     return get_errno(ret);
1468 }
1469 
1470 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1471                                               abi_ulong target_addr,
1472                                               socklen_t len)
1473 {
1474     struct target_ip_mreqn *target_smreqn;
1475 
1476     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1477     if (!target_smreqn)
1478         return -TARGET_EFAULT;
1479     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1480     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1481     if (len == sizeof(struct target_ip_mreqn))
1482         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1483     unlock_user(target_smreqn, target_addr, 0);
1484 
1485     return 0;
1486 }
1487 
1488 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1489                                                abi_ulong target_addr,
1490                                                socklen_t len)
1491 {
1492     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1493     sa_family_t sa_family;
1494     struct target_sockaddr *target_saddr;
1495 
1496     if (fd_trans_target_to_host_addr(fd)) {
1497         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1498     }
1499 
1500     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1501     if (!target_saddr)
1502         return -TARGET_EFAULT;
1503 
1504     sa_family = tswap16(target_saddr->sa_family);
1505 
1506     /* Oops. The caller might send a incomplete sun_path; sun_path
1507      * must be terminated by \0 (see the manual page), but
1508      * unfortunately it is quite common to specify sockaddr_un
1509      * length as "strlen(x->sun_path)" while it should be
1510      * "strlen(...) + 1". We'll fix that here if needed.
1511      * Linux kernel has a similar feature.
1512      */
1513 
1514     if (sa_family == AF_UNIX) {
1515         if (len < unix_maxlen && len > 0) {
1516             char *cp = (char*)target_saddr;
1517 
1518             if ( cp[len-1] && !cp[len] )
1519                 len++;
1520         }
1521         if (len > unix_maxlen)
1522             len = unix_maxlen;
1523     }
1524 
1525     memcpy(addr, target_saddr, len);
1526     addr->sa_family = sa_family;
1527     if (sa_family == AF_NETLINK) {
1528         struct sockaddr_nl *nladdr;
1529 
1530         nladdr = (struct sockaddr_nl *)addr;
1531         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1532         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1533     } else if (sa_family == AF_PACKET) {
1534 	struct target_sockaddr_ll *lladdr;
1535 
1536 	lladdr = (struct target_sockaddr_ll *)addr;
1537 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1538 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1539     }
1540     unlock_user(target_saddr, target_addr, 0);
1541 
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1546                                                struct sockaddr *addr,
1547                                                socklen_t len)
1548 {
1549     struct target_sockaddr *target_saddr;
1550 
1551     if (len == 0) {
1552         return 0;
1553     }
1554     assert(addr);
1555 
1556     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1557     if (!target_saddr)
1558         return -TARGET_EFAULT;
1559     memcpy(target_saddr, addr, len);
1560     if (len >= offsetof(struct target_sockaddr, sa_family) +
1561         sizeof(target_saddr->sa_family)) {
1562         target_saddr->sa_family = tswap16(addr->sa_family);
1563     }
1564     if (addr->sa_family == AF_NETLINK &&
1565         len >= sizeof(struct target_sockaddr_nl)) {
1566         struct target_sockaddr_nl *target_nl =
1567                (struct target_sockaddr_nl *)target_saddr;
1568         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1569         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1570     } else if (addr->sa_family == AF_PACKET) {
1571         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1572         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1573         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1574     } else if (addr->sa_family == AF_INET6 &&
1575                len >= sizeof(struct target_sockaddr_in6)) {
1576         struct target_sockaddr_in6 *target_in6 =
1577                (struct target_sockaddr_in6 *)target_saddr;
1578         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1579     }
1580     unlock_user(target_saddr, target_addr, len);
1581 
1582     return 0;
1583 }
1584 
1585 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1586                                            struct target_msghdr *target_msgh)
1587 {
1588     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1589     abi_long msg_controllen;
1590     abi_ulong target_cmsg_addr;
1591     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1592     socklen_t space = 0;
1593 
1594     msg_controllen = tswapal(target_msgh->msg_controllen);
1595     if (msg_controllen < sizeof (struct target_cmsghdr))
1596         goto the_end;
1597     target_cmsg_addr = tswapal(target_msgh->msg_control);
1598     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1599     target_cmsg_start = target_cmsg;
1600     if (!target_cmsg)
1601         return -TARGET_EFAULT;
1602 
1603     while (cmsg && target_cmsg) {
1604         void *data = CMSG_DATA(cmsg);
1605         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1606 
1607         int len = tswapal(target_cmsg->cmsg_len)
1608             - sizeof(struct target_cmsghdr);
1609 
1610         space += CMSG_SPACE(len);
1611         if (space > msgh->msg_controllen) {
1612             space -= CMSG_SPACE(len);
1613             /* This is a QEMU bug, since we allocated the payload
1614              * area ourselves (unlike overflow in host-to-target
1615              * conversion, which is just the guest giving us a buffer
1616              * that's too small). It can't happen for the payload types
1617              * we currently support; if it becomes an issue in future
1618              * we would need to improve our allocation strategy to
1619              * something more intelligent than "twice the size of the
1620              * target buffer we're reading from".
1621              */
1622             qemu_log_mask(LOG_UNIMP,
1623                           ("Unsupported ancillary data %d/%d: "
1624                            "unhandled msg size\n"),
1625                           tswap32(target_cmsg->cmsg_level),
1626                           tswap32(target_cmsg->cmsg_type));
1627             break;
1628         }
1629 
1630         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1631             cmsg->cmsg_level = SOL_SOCKET;
1632         } else {
1633             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1634         }
1635         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1636         cmsg->cmsg_len = CMSG_LEN(len);
1637 
1638         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1639             int *fd = (int *)data;
1640             int *target_fd = (int *)target_data;
1641             int i, numfds = len / sizeof(int);
1642 
1643             for (i = 0; i < numfds; i++) {
1644                 __get_user(fd[i], target_fd + i);
1645             }
1646         } else if (cmsg->cmsg_level == SOL_SOCKET
1647                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1648             struct ucred *cred = (struct ucred *)data;
1649             struct target_ucred *target_cred =
1650                 (struct target_ucred *)target_data;
1651 
1652             __get_user(cred->pid, &target_cred->pid);
1653             __get_user(cred->uid, &target_cred->uid);
1654             __get_user(cred->gid, &target_cred->gid);
1655         } else {
1656             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1657                           cmsg->cmsg_level, cmsg->cmsg_type);
1658             memcpy(data, target_data, len);
1659         }
1660 
1661         cmsg = CMSG_NXTHDR(msgh, cmsg);
1662         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1663                                          target_cmsg_start);
1664     }
1665     unlock_user(target_cmsg, target_cmsg_addr, 0);
1666  the_end:
1667     msgh->msg_controllen = space;
1668     return 0;
1669 }
1670 
1671 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1672                                            struct msghdr *msgh)
1673 {
1674     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1675     abi_long msg_controllen;
1676     abi_ulong target_cmsg_addr;
1677     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1678     socklen_t space = 0;
1679 
1680     msg_controllen = tswapal(target_msgh->msg_controllen);
1681     if (msg_controllen < sizeof (struct target_cmsghdr))
1682         goto the_end;
1683     target_cmsg_addr = tswapal(target_msgh->msg_control);
1684     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1685     target_cmsg_start = target_cmsg;
1686     if (!target_cmsg)
1687         return -TARGET_EFAULT;
1688 
1689     while (cmsg && target_cmsg) {
1690         void *data = CMSG_DATA(cmsg);
1691         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1692 
1693         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1694         int tgt_len, tgt_space;
1695 
1696         /* We never copy a half-header but may copy half-data;
1697          * this is Linux's behaviour in put_cmsg(). Note that
1698          * truncation here is a guest problem (which we report
1699          * to the guest via the CTRUNC bit), unlike truncation
1700          * in target_to_host_cmsg, which is a QEMU bug.
1701          */
1702         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1703             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1704             break;
1705         }
1706 
1707         if (cmsg->cmsg_level == SOL_SOCKET) {
1708             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1709         } else {
1710             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1711         }
1712         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1713 
1714         /* Payload types which need a different size of payload on
1715          * the target must adjust tgt_len here.
1716          */
1717         tgt_len = len;
1718         switch (cmsg->cmsg_level) {
1719         case SOL_SOCKET:
1720             switch (cmsg->cmsg_type) {
1721             case SO_TIMESTAMP:
1722                 tgt_len = sizeof(struct target_timeval);
1723                 break;
1724             default:
1725                 break;
1726             }
1727             break;
1728         default:
1729             break;
1730         }
1731 
1732         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1733             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1734             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1735         }
1736 
1737         /* We must now copy-and-convert len bytes of payload
1738          * into tgt_len bytes of destination space. Bear in mind
1739          * that in both source and destination we may be dealing
1740          * with a truncated value!
1741          */
1742         switch (cmsg->cmsg_level) {
1743         case SOL_SOCKET:
1744             switch (cmsg->cmsg_type) {
1745             case SCM_RIGHTS:
1746             {
1747                 int *fd = (int *)data;
1748                 int *target_fd = (int *)target_data;
1749                 int i, numfds = tgt_len / sizeof(int);
1750 
1751                 for (i = 0; i < numfds; i++) {
1752                     __put_user(fd[i], target_fd + i);
1753                 }
1754                 break;
1755             }
1756             case SO_TIMESTAMP:
1757             {
1758                 struct timeval *tv = (struct timeval *)data;
1759                 struct target_timeval *target_tv =
1760                     (struct target_timeval *)target_data;
1761 
1762                 if (len != sizeof(struct timeval) ||
1763                     tgt_len != sizeof(struct target_timeval)) {
1764                     goto unimplemented;
1765                 }
1766 
1767                 /* copy struct timeval to target */
1768                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1769                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1770                 break;
1771             }
1772             case SCM_CREDENTIALS:
1773             {
1774                 struct ucred *cred = (struct ucred *)data;
1775                 struct target_ucred *target_cred =
1776                     (struct target_ucred *)target_data;
1777 
1778                 __put_user(cred->pid, &target_cred->pid);
1779                 __put_user(cred->uid, &target_cred->uid);
1780                 __put_user(cred->gid, &target_cred->gid);
1781                 break;
1782             }
1783             default:
1784                 goto unimplemented;
1785             }
1786             break;
1787 
1788         case SOL_IP:
1789             switch (cmsg->cmsg_type) {
1790             case IP_TTL:
1791             {
1792                 uint32_t *v = (uint32_t *)data;
1793                 uint32_t *t_int = (uint32_t *)target_data;
1794 
1795                 if (len != sizeof(uint32_t) ||
1796                     tgt_len != sizeof(uint32_t)) {
1797                     goto unimplemented;
1798                 }
1799                 __put_user(*v, t_int);
1800                 break;
1801             }
1802             case IP_RECVERR:
1803             {
1804                 struct errhdr_t {
1805                    struct sock_extended_err ee;
1806                    struct sockaddr_in offender;
1807                 };
1808                 struct errhdr_t *errh = (struct errhdr_t *)data;
1809                 struct errhdr_t *target_errh =
1810                     (struct errhdr_t *)target_data;
1811 
1812                 if (len != sizeof(struct errhdr_t) ||
1813                     tgt_len != sizeof(struct errhdr_t)) {
1814                     goto unimplemented;
1815                 }
1816                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1817                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1818                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1819                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1820                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1821                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1822                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1823                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1824                     (void *) &errh->offender, sizeof(errh->offender));
1825                 break;
1826             }
1827             default:
1828                 goto unimplemented;
1829             }
1830             break;
1831 
1832         case SOL_IPV6:
1833             switch (cmsg->cmsg_type) {
1834             case IPV6_HOPLIMIT:
1835             {
1836                 uint32_t *v = (uint32_t *)data;
1837                 uint32_t *t_int = (uint32_t *)target_data;
1838 
1839                 if (len != sizeof(uint32_t) ||
1840                     tgt_len != sizeof(uint32_t)) {
1841                     goto unimplemented;
1842                 }
1843                 __put_user(*v, t_int);
1844                 break;
1845             }
1846             case IPV6_RECVERR:
1847             {
1848                 struct errhdr6_t {
1849                    struct sock_extended_err ee;
1850                    struct sockaddr_in6 offender;
1851                 };
1852                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1853                 struct errhdr6_t *target_errh =
1854                     (struct errhdr6_t *)target_data;
1855 
1856                 if (len != sizeof(struct errhdr6_t) ||
1857                     tgt_len != sizeof(struct errhdr6_t)) {
1858                     goto unimplemented;
1859                 }
1860                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1861                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1862                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1863                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1864                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1865                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1866                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1867                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1868                     (void *) &errh->offender, sizeof(errh->offender));
1869                 break;
1870             }
1871             default:
1872                 goto unimplemented;
1873             }
1874             break;
1875 
1876         default:
1877         unimplemented:
1878             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1879                           cmsg->cmsg_level, cmsg->cmsg_type);
1880             memcpy(target_data, data, MIN(len, tgt_len));
1881             if (tgt_len > len) {
1882                 memset(target_data + len, 0, tgt_len - len);
1883             }
1884         }
1885 
1886         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1887         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1888         if (msg_controllen < tgt_space) {
1889             tgt_space = msg_controllen;
1890         }
1891         msg_controllen -= tgt_space;
1892         space += tgt_space;
1893         cmsg = CMSG_NXTHDR(msgh, cmsg);
1894         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1895                                          target_cmsg_start);
1896     }
1897     unlock_user(target_cmsg, target_cmsg_addr, space);
1898  the_end:
1899     target_msgh->msg_controllen = tswapal(space);
1900     return 0;
1901 }
1902 
1903 /* do_setsockopt() Must return target values and target errnos. */
1904 static abi_long do_setsockopt(int sockfd, int level, int optname,
1905                               abi_ulong optval_addr, socklen_t optlen)
1906 {
1907     abi_long ret;
1908     int val;
1909     struct ip_mreqn *ip_mreq;
1910     struct ip_mreq_source *ip_mreq_source;
1911 
1912     switch(level) {
1913     case SOL_TCP:
1914         /* TCP options all take an 'int' value.  */
1915         if (optlen < sizeof(uint32_t))
1916             return -TARGET_EINVAL;
1917 
1918         if (get_user_u32(val, optval_addr))
1919             return -TARGET_EFAULT;
1920         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1921         break;
1922     case SOL_IP:
1923         switch(optname) {
1924         case IP_TOS:
1925         case IP_TTL:
1926         case IP_HDRINCL:
1927         case IP_ROUTER_ALERT:
1928         case IP_RECVOPTS:
1929         case IP_RETOPTS:
1930         case IP_PKTINFO:
1931         case IP_MTU_DISCOVER:
1932         case IP_RECVERR:
1933         case IP_RECVTTL:
1934         case IP_RECVTOS:
1935 #ifdef IP_FREEBIND
1936         case IP_FREEBIND:
1937 #endif
1938         case IP_MULTICAST_TTL:
1939         case IP_MULTICAST_LOOP:
1940             val = 0;
1941             if (optlen >= sizeof(uint32_t)) {
1942                 if (get_user_u32(val, optval_addr))
1943                     return -TARGET_EFAULT;
1944             } else if (optlen >= 1) {
1945                 if (get_user_u8(val, optval_addr))
1946                     return -TARGET_EFAULT;
1947             }
1948             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1949             break;
1950         case IP_ADD_MEMBERSHIP:
1951         case IP_DROP_MEMBERSHIP:
1952             if (optlen < sizeof (struct target_ip_mreq) ||
1953                 optlen > sizeof (struct target_ip_mreqn))
1954                 return -TARGET_EINVAL;
1955 
1956             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1957             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1958             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1959             break;
1960 
1961         case IP_BLOCK_SOURCE:
1962         case IP_UNBLOCK_SOURCE:
1963         case IP_ADD_SOURCE_MEMBERSHIP:
1964         case IP_DROP_SOURCE_MEMBERSHIP:
1965             if (optlen != sizeof (struct target_ip_mreq_source))
1966                 return -TARGET_EINVAL;
1967 
1968             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1969             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1970             unlock_user (ip_mreq_source, optval_addr, 0);
1971             break;
1972 
1973         default:
1974             goto unimplemented;
1975         }
1976         break;
1977     case SOL_IPV6:
1978         switch (optname) {
1979         case IPV6_MTU_DISCOVER:
1980         case IPV6_MTU:
1981         case IPV6_V6ONLY:
1982         case IPV6_RECVPKTINFO:
1983         case IPV6_UNICAST_HOPS:
1984         case IPV6_MULTICAST_HOPS:
1985         case IPV6_MULTICAST_LOOP:
1986         case IPV6_RECVERR:
1987         case IPV6_RECVHOPLIMIT:
1988         case IPV6_2292HOPLIMIT:
1989         case IPV6_CHECKSUM:
1990         case IPV6_ADDRFORM:
1991         case IPV6_2292PKTINFO:
1992         case IPV6_RECVTCLASS:
1993         case IPV6_RECVRTHDR:
1994         case IPV6_2292RTHDR:
1995         case IPV6_RECVHOPOPTS:
1996         case IPV6_2292HOPOPTS:
1997         case IPV6_RECVDSTOPTS:
1998         case IPV6_2292DSTOPTS:
1999         case IPV6_TCLASS:
2000 #ifdef IPV6_RECVPATHMTU
2001         case IPV6_RECVPATHMTU:
2002 #endif
2003 #ifdef IPV6_TRANSPARENT
2004         case IPV6_TRANSPARENT:
2005 #endif
2006 #ifdef IPV6_FREEBIND
2007         case IPV6_FREEBIND:
2008 #endif
2009 #ifdef IPV6_RECVORIGDSTADDR
2010         case IPV6_RECVORIGDSTADDR:
2011 #endif
2012             val = 0;
2013             if (optlen < sizeof(uint32_t)) {
2014                 return -TARGET_EINVAL;
2015             }
2016             if (get_user_u32(val, optval_addr)) {
2017                 return -TARGET_EFAULT;
2018             }
2019             ret = get_errno(setsockopt(sockfd, level, optname,
2020                                        &val, sizeof(val)));
2021             break;
2022         case IPV6_PKTINFO:
2023         {
2024             struct in6_pktinfo pki;
2025 
2026             if (optlen < sizeof(pki)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2031                 return -TARGET_EFAULT;
2032             }
2033 
2034             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2035 
2036             ret = get_errno(setsockopt(sockfd, level, optname,
2037                                        &pki, sizeof(pki)));
2038             break;
2039         }
2040         case IPV6_ADD_MEMBERSHIP:
2041         case IPV6_DROP_MEMBERSHIP:
2042         {
2043             struct ipv6_mreq ipv6mreq;
2044 
2045             if (optlen < sizeof(ipv6mreq)) {
2046                 return -TARGET_EINVAL;
2047             }
2048 
2049             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2050                 return -TARGET_EFAULT;
2051             }
2052 
2053             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2054 
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        &ipv6mreq, sizeof(ipv6mreq)));
2057             break;
2058         }
2059         default:
2060             goto unimplemented;
2061         }
2062         break;
2063     case SOL_ICMPV6:
2064         switch (optname) {
2065         case ICMPV6_FILTER:
2066         {
2067             struct icmp6_filter icmp6f;
2068 
2069             if (optlen > sizeof(icmp6f)) {
2070                 optlen = sizeof(icmp6f);
2071             }
2072 
2073             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2074                 return -TARGET_EFAULT;
2075             }
2076 
2077             for (val = 0; val < 8; val++) {
2078                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2079             }
2080 
2081             ret = get_errno(setsockopt(sockfd, level, optname,
2082                                        &icmp6f, optlen));
2083             break;
2084         }
2085         default:
2086             goto unimplemented;
2087         }
2088         break;
2089     case SOL_RAW:
2090         switch (optname) {
2091         case ICMP_FILTER:
2092         case IPV6_CHECKSUM:
2093             /* those take an u32 value */
2094             if (optlen < sizeof(uint32_t)) {
2095                 return -TARGET_EINVAL;
2096             }
2097 
2098             if (get_user_u32(val, optval_addr)) {
2099                 return -TARGET_EFAULT;
2100             }
2101             ret = get_errno(setsockopt(sockfd, level, optname,
2102                                        &val, sizeof(val)));
2103             break;
2104 
2105         default:
2106             goto unimplemented;
2107         }
2108         break;
2109 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2110     case SOL_ALG:
2111         switch (optname) {
2112         case ALG_SET_KEY:
2113         {
2114             char *alg_key = g_malloc(optlen);
2115 
2116             if (!alg_key) {
2117                 return -TARGET_ENOMEM;
2118             }
2119             if (copy_from_user(alg_key, optval_addr, optlen)) {
2120                 g_free(alg_key);
2121                 return -TARGET_EFAULT;
2122             }
2123             ret = get_errno(setsockopt(sockfd, level, optname,
2124                                        alg_key, optlen));
2125             g_free(alg_key);
2126             break;
2127         }
2128         case ALG_SET_AEAD_AUTHSIZE:
2129         {
2130             ret = get_errno(setsockopt(sockfd, level, optname,
2131                                        NULL, optlen));
2132             break;
2133         }
2134         default:
2135             goto unimplemented;
2136         }
2137         break;
2138 #endif
2139     case TARGET_SOL_SOCKET:
2140         switch (optname) {
2141         case TARGET_SO_RCVTIMEO:
2142         {
2143                 struct timeval tv;
2144 
2145                 optname = SO_RCVTIMEO;
2146 
2147 set_timeout:
2148                 if (optlen != sizeof(struct target_timeval)) {
2149                     return -TARGET_EINVAL;
2150                 }
2151 
2152                 if (copy_from_user_timeval(&tv, optval_addr)) {
2153                     return -TARGET_EFAULT;
2154                 }
2155 
2156                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2157                                 &tv, sizeof(tv)));
2158                 return ret;
2159         }
2160         case TARGET_SO_SNDTIMEO:
2161                 optname = SO_SNDTIMEO;
2162                 goto set_timeout;
2163         case TARGET_SO_ATTACH_FILTER:
2164         {
2165                 struct target_sock_fprog *tfprog;
2166                 struct target_sock_filter *tfilter;
2167                 struct sock_fprog fprog;
2168                 struct sock_filter *filter;
2169                 int i;
2170 
2171                 if (optlen != sizeof(*tfprog)) {
2172                     return -TARGET_EINVAL;
2173                 }
2174                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2175                     return -TARGET_EFAULT;
2176                 }
2177                 if (!lock_user_struct(VERIFY_READ, tfilter,
2178                                       tswapal(tfprog->filter), 0)) {
2179                     unlock_user_struct(tfprog, optval_addr, 1);
2180                     return -TARGET_EFAULT;
2181                 }
2182 
2183                 fprog.len = tswap16(tfprog->len);
2184                 filter = g_try_new(struct sock_filter, fprog.len);
2185                 if (filter == NULL) {
2186                     unlock_user_struct(tfilter, tfprog->filter, 1);
2187                     unlock_user_struct(tfprog, optval_addr, 1);
2188                     return -TARGET_ENOMEM;
2189                 }
2190                 for (i = 0; i < fprog.len; i++) {
2191                     filter[i].code = tswap16(tfilter[i].code);
2192                     filter[i].jt = tfilter[i].jt;
2193                     filter[i].jf = tfilter[i].jf;
2194                     filter[i].k = tswap32(tfilter[i].k);
2195                 }
2196                 fprog.filter = filter;
2197 
2198                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2199                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2200                 g_free(filter);
2201 
2202                 unlock_user_struct(tfilter, tfprog->filter, 1);
2203                 unlock_user_struct(tfprog, optval_addr, 1);
2204                 return ret;
2205         }
2206 	case TARGET_SO_BINDTODEVICE:
2207 	{
2208 		char *dev_ifname, *addr_ifname;
2209 
2210 		if (optlen > IFNAMSIZ - 1) {
2211 		    optlen = IFNAMSIZ - 1;
2212 		}
2213 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2214 		if (!dev_ifname) {
2215 		    return -TARGET_EFAULT;
2216 		}
2217 		optname = SO_BINDTODEVICE;
2218 		addr_ifname = alloca(IFNAMSIZ);
2219 		memcpy(addr_ifname, dev_ifname, optlen);
2220 		addr_ifname[optlen] = 0;
2221 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2222                                            addr_ifname, optlen));
2223 		unlock_user (dev_ifname, optval_addr, 0);
2224 		return ret;
2225 	}
2226         case TARGET_SO_LINGER:
2227         {
2228                 struct linger lg;
2229                 struct target_linger *tlg;
2230 
2231                 if (optlen != sizeof(struct target_linger)) {
2232                     return -TARGET_EINVAL;
2233                 }
2234                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2235                     return -TARGET_EFAULT;
2236                 }
2237                 __get_user(lg.l_onoff, &tlg->l_onoff);
2238                 __get_user(lg.l_linger, &tlg->l_linger);
2239                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2240                                 &lg, sizeof(lg)));
2241                 unlock_user_struct(tlg, optval_addr, 0);
2242                 return ret;
2243         }
2244             /* Options with 'int' argument.  */
2245         case TARGET_SO_DEBUG:
2246 		optname = SO_DEBUG;
2247 		break;
2248         case TARGET_SO_REUSEADDR:
2249 		optname = SO_REUSEADDR;
2250 		break;
2251 #ifdef SO_REUSEPORT
2252         case TARGET_SO_REUSEPORT:
2253                 optname = SO_REUSEPORT;
2254                 break;
2255 #endif
2256         case TARGET_SO_TYPE:
2257 		optname = SO_TYPE;
2258 		break;
2259         case TARGET_SO_ERROR:
2260 		optname = SO_ERROR;
2261 		break;
2262         case TARGET_SO_DONTROUTE:
2263 		optname = SO_DONTROUTE;
2264 		break;
2265         case TARGET_SO_BROADCAST:
2266 		optname = SO_BROADCAST;
2267 		break;
2268         case TARGET_SO_SNDBUF:
2269 		optname = SO_SNDBUF;
2270 		break;
2271         case TARGET_SO_SNDBUFFORCE:
2272                 optname = SO_SNDBUFFORCE;
2273                 break;
2274         case TARGET_SO_RCVBUF:
2275 		optname = SO_RCVBUF;
2276 		break;
2277         case TARGET_SO_RCVBUFFORCE:
2278                 optname = SO_RCVBUFFORCE;
2279                 break;
2280         case TARGET_SO_KEEPALIVE:
2281 		optname = SO_KEEPALIVE;
2282 		break;
2283         case TARGET_SO_OOBINLINE:
2284 		optname = SO_OOBINLINE;
2285 		break;
2286         case TARGET_SO_NO_CHECK:
2287 		optname = SO_NO_CHECK;
2288 		break;
2289         case TARGET_SO_PRIORITY:
2290 		optname = SO_PRIORITY;
2291 		break;
2292 #ifdef SO_BSDCOMPAT
2293         case TARGET_SO_BSDCOMPAT:
2294 		optname = SO_BSDCOMPAT;
2295 		break;
2296 #endif
2297         case TARGET_SO_PASSCRED:
2298 		optname = SO_PASSCRED;
2299 		break;
2300         case TARGET_SO_PASSSEC:
2301                 optname = SO_PASSSEC;
2302                 break;
2303         case TARGET_SO_TIMESTAMP:
2304 		optname = SO_TIMESTAMP;
2305 		break;
2306         case TARGET_SO_RCVLOWAT:
2307 		optname = SO_RCVLOWAT;
2308 		break;
2309         default:
2310             goto unimplemented;
2311         }
2312 	if (optlen < sizeof(uint32_t))
2313             return -TARGET_EINVAL;
2314 
2315 	if (get_user_u32(val, optval_addr))
2316             return -TARGET_EFAULT;
2317 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2318         break;
2319 #ifdef SOL_NETLINK
2320     case SOL_NETLINK:
2321         switch (optname) {
2322         case NETLINK_PKTINFO:
2323         case NETLINK_ADD_MEMBERSHIP:
2324         case NETLINK_DROP_MEMBERSHIP:
2325         case NETLINK_BROADCAST_ERROR:
2326         case NETLINK_NO_ENOBUFS:
2327 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2328         case NETLINK_LISTEN_ALL_NSID:
2329         case NETLINK_CAP_ACK:
2330 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2331 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2332         case NETLINK_EXT_ACK:
2333 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2334 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2335         case NETLINK_GET_STRICT_CHK:
2336 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2337             break;
2338         default:
2339             goto unimplemented;
2340         }
2341         val = 0;
2342         if (optlen < sizeof(uint32_t)) {
2343             return -TARGET_EINVAL;
2344         }
2345         if (get_user_u32(val, optval_addr)) {
2346             return -TARGET_EFAULT;
2347         }
2348         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2349                                    sizeof(val)));
2350         break;
2351 #endif /* SOL_NETLINK */
2352     default:
2353     unimplemented:
2354         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2355                       level, optname);
2356         ret = -TARGET_ENOPROTOOPT;
2357     }
2358     return ret;
2359 }
2360 
2361 /* do_getsockopt() Must return target values and target errnos. */
2362 static abi_long do_getsockopt(int sockfd, int level, int optname,
2363                               abi_ulong optval_addr, abi_ulong optlen)
2364 {
2365     abi_long ret;
2366     int len, val;
2367     socklen_t lv;
2368 
2369     switch(level) {
2370     case TARGET_SOL_SOCKET:
2371         level = SOL_SOCKET;
2372         switch (optname) {
2373         /* These don't just return a single integer */
2374         case TARGET_SO_PEERNAME:
2375             goto unimplemented;
2376         case TARGET_SO_RCVTIMEO: {
2377             struct timeval tv;
2378             socklen_t tvlen;
2379 
2380             optname = SO_RCVTIMEO;
2381 
2382 get_timeout:
2383             if (get_user_u32(len, optlen)) {
2384                 return -TARGET_EFAULT;
2385             }
2386             if (len < 0) {
2387                 return -TARGET_EINVAL;
2388             }
2389 
2390             tvlen = sizeof(tv);
2391             ret = get_errno(getsockopt(sockfd, level, optname,
2392                                        &tv, &tvlen));
2393             if (ret < 0) {
2394                 return ret;
2395             }
2396             if (len > sizeof(struct target_timeval)) {
2397                 len = sizeof(struct target_timeval);
2398             }
2399             if (copy_to_user_timeval(optval_addr, &tv)) {
2400                 return -TARGET_EFAULT;
2401             }
2402             if (put_user_u32(len, optlen)) {
2403                 return -TARGET_EFAULT;
2404             }
2405             break;
2406         }
2407         case TARGET_SO_SNDTIMEO:
2408             optname = SO_SNDTIMEO;
2409             goto get_timeout;
2410         case TARGET_SO_PEERCRED: {
2411             struct ucred cr;
2412             socklen_t crlen;
2413             struct target_ucred *tcr;
2414 
2415             if (get_user_u32(len, optlen)) {
2416                 return -TARGET_EFAULT;
2417             }
2418             if (len < 0) {
2419                 return -TARGET_EINVAL;
2420             }
2421 
2422             crlen = sizeof(cr);
2423             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2424                                        &cr, &crlen));
2425             if (ret < 0) {
2426                 return ret;
2427             }
2428             if (len > crlen) {
2429                 len = crlen;
2430             }
2431             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2432                 return -TARGET_EFAULT;
2433             }
2434             __put_user(cr.pid, &tcr->pid);
2435             __put_user(cr.uid, &tcr->uid);
2436             __put_user(cr.gid, &tcr->gid);
2437             unlock_user_struct(tcr, optval_addr, 1);
2438             if (put_user_u32(len, optlen)) {
2439                 return -TARGET_EFAULT;
2440             }
2441             break;
2442         }
2443         case TARGET_SO_PEERSEC: {
2444             char *name;
2445 
2446             if (get_user_u32(len, optlen)) {
2447                 return -TARGET_EFAULT;
2448             }
2449             if (len < 0) {
2450                 return -TARGET_EINVAL;
2451             }
2452             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2453             if (!name) {
2454                 return -TARGET_EFAULT;
2455             }
2456             lv = len;
2457             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2458                                        name, &lv));
2459             if (put_user_u32(lv, optlen)) {
2460                 ret = -TARGET_EFAULT;
2461             }
2462             unlock_user(name, optval_addr, lv);
2463             break;
2464         }
2465         case TARGET_SO_LINGER:
2466         {
2467             struct linger lg;
2468             socklen_t lglen;
2469             struct target_linger *tlg;
2470 
2471             if (get_user_u32(len, optlen)) {
2472                 return -TARGET_EFAULT;
2473             }
2474             if (len < 0) {
2475                 return -TARGET_EINVAL;
2476             }
2477 
2478             lglen = sizeof(lg);
2479             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2480                                        &lg, &lglen));
2481             if (ret < 0) {
2482                 return ret;
2483             }
2484             if (len > lglen) {
2485                 len = lglen;
2486             }
2487             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2488                 return -TARGET_EFAULT;
2489             }
2490             __put_user(lg.l_onoff, &tlg->l_onoff);
2491             __put_user(lg.l_linger, &tlg->l_linger);
2492             unlock_user_struct(tlg, optval_addr, 1);
2493             if (put_user_u32(len, optlen)) {
2494                 return -TARGET_EFAULT;
2495             }
2496             break;
2497         }
2498         /* Options with 'int' argument.  */
2499         case TARGET_SO_DEBUG:
2500             optname = SO_DEBUG;
2501             goto int_case;
2502         case TARGET_SO_REUSEADDR:
2503             optname = SO_REUSEADDR;
2504             goto int_case;
2505 #ifdef SO_REUSEPORT
2506         case TARGET_SO_REUSEPORT:
2507             optname = SO_REUSEPORT;
2508             goto int_case;
2509 #endif
2510         case TARGET_SO_TYPE:
2511             optname = SO_TYPE;
2512             goto int_case;
2513         case TARGET_SO_ERROR:
2514             optname = SO_ERROR;
2515             goto int_case;
2516         case TARGET_SO_DONTROUTE:
2517             optname = SO_DONTROUTE;
2518             goto int_case;
2519         case TARGET_SO_BROADCAST:
2520             optname = SO_BROADCAST;
2521             goto int_case;
2522         case TARGET_SO_SNDBUF:
2523             optname = SO_SNDBUF;
2524             goto int_case;
2525         case TARGET_SO_RCVBUF:
2526             optname = SO_RCVBUF;
2527             goto int_case;
2528         case TARGET_SO_KEEPALIVE:
2529             optname = SO_KEEPALIVE;
2530             goto int_case;
2531         case TARGET_SO_OOBINLINE:
2532             optname = SO_OOBINLINE;
2533             goto int_case;
2534         case TARGET_SO_NO_CHECK:
2535             optname = SO_NO_CHECK;
2536             goto int_case;
2537         case TARGET_SO_PRIORITY:
2538             optname = SO_PRIORITY;
2539             goto int_case;
2540 #ifdef SO_BSDCOMPAT
2541         case TARGET_SO_BSDCOMPAT:
2542             optname = SO_BSDCOMPAT;
2543             goto int_case;
2544 #endif
2545         case TARGET_SO_PASSCRED:
2546             optname = SO_PASSCRED;
2547             goto int_case;
2548         case TARGET_SO_TIMESTAMP:
2549             optname = SO_TIMESTAMP;
2550             goto int_case;
2551         case TARGET_SO_RCVLOWAT:
2552             optname = SO_RCVLOWAT;
2553             goto int_case;
2554         case TARGET_SO_ACCEPTCONN:
2555             optname = SO_ACCEPTCONN;
2556             goto int_case;
2557         default:
2558             goto int_case;
2559         }
2560         break;
2561     case SOL_TCP:
2562         /* TCP options all take an 'int' value.  */
2563     int_case:
2564         if (get_user_u32(len, optlen))
2565             return -TARGET_EFAULT;
2566         if (len < 0)
2567             return -TARGET_EINVAL;
2568         lv = sizeof(lv);
2569         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2570         if (ret < 0)
2571             return ret;
2572         if (optname == SO_TYPE) {
2573             val = host_to_target_sock_type(val);
2574         }
2575         if (len > lv)
2576             len = lv;
2577         if (len == 4) {
2578             if (put_user_u32(val, optval_addr))
2579                 return -TARGET_EFAULT;
2580         } else {
2581             if (put_user_u8(val, optval_addr))
2582                 return -TARGET_EFAULT;
2583         }
2584         if (put_user_u32(len, optlen))
2585             return -TARGET_EFAULT;
2586         break;
2587     case SOL_IP:
2588         switch(optname) {
2589         case IP_TOS:
2590         case IP_TTL:
2591         case IP_HDRINCL:
2592         case IP_ROUTER_ALERT:
2593         case IP_RECVOPTS:
2594         case IP_RETOPTS:
2595         case IP_PKTINFO:
2596         case IP_MTU_DISCOVER:
2597         case IP_RECVERR:
2598         case IP_RECVTOS:
2599 #ifdef IP_FREEBIND
2600         case IP_FREEBIND:
2601 #endif
2602         case IP_MULTICAST_TTL:
2603         case IP_MULTICAST_LOOP:
2604             if (get_user_u32(len, optlen))
2605                 return -TARGET_EFAULT;
2606             if (len < 0)
2607                 return -TARGET_EINVAL;
2608             lv = sizeof(lv);
2609             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2610             if (ret < 0)
2611                 return ret;
2612             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2613                 len = 1;
2614                 if (put_user_u32(len, optlen)
2615                     || put_user_u8(val, optval_addr))
2616                     return -TARGET_EFAULT;
2617             } else {
2618                 if (len > sizeof(int))
2619                     len = sizeof(int);
2620                 if (put_user_u32(len, optlen)
2621                     || put_user_u32(val, optval_addr))
2622                     return -TARGET_EFAULT;
2623             }
2624             break;
2625         default:
2626             ret = -TARGET_ENOPROTOOPT;
2627             break;
2628         }
2629         break;
2630     case SOL_IPV6:
2631         switch (optname) {
2632         case IPV6_MTU_DISCOVER:
2633         case IPV6_MTU:
2634         case IPV6_V6ONLY:
2635         case IPV6_RECVPKTINFO:
2636         case IPV6_UNICAST_HOPS:
2637         case IPV6_MULTICAST_HOPS:
2638         case IPV6_MULTICAST_LOOP:
2639         case IPV6_RECVERR:
2640         case IPV6_RECVHOPLIMIT:
2641         case IPV6_2292HOPLIMIT:
2642         case IPV6_CHECKSUM:
2643         case IPV6_ADDRFORM:
2644         case IPV6_2292PKTINFO:
2645         case IPV6_RECVTCLASS:
2646         case IPV6_RECVRTHDR:
2647         case IPV6_2292RTHDR:
2648         case IPV6_RECVHOPOPTS:
2649         case IPV6_2292HOPOPTS:
2650         case IPV6_RECVDSTOPTS:
2651         case IPV6_2292DSTOPTS:
2652         case IPV6_TCLASS:
2653 #ifdef IPV6_RECVPATHMTU
2654         case IPV6_RECVPATHMTU:
2655 #endif
2656 #ifdef IPV6_TRANSPARENT
2657         case IPV6_TRANSPARENT:
2658 #endif
2659 #ifdef IPV6_FREEBIND
2660         case IPV6_FREEBIND:
2661 #endif
2662 #ifdef IPV6_RECVORIGDSTADDR
2663         case IPV6_RECVORIGDSTADDR:
2664 #endif
2665             if (get_user_u32(len, optlen))
2666                 return -TARGET_EFAULT;
2667             if (len < 0)
2668                 return -TARGET_EINVAL;
2669             lv = sizeof(lv);
2670             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2671             if (ret < 0)
2672                 return ret;
2673             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2674                 len = 1;
2675                 if (put_user_u32(len, optlen)
2676                     || put_user_u8(val, optval_addr))
2677                     return -TARGET_EFAULT;
2678             } else {
2679                 if (len > sizeof(int))
2680                     len = sizeof(int);
2681                 if (put_user_u32(len, optlen)
2682                     || put_user_u32(val, optval_addr))
2683                     return -TARGET_EFAULT;
2684             }
2685             break;
2686         default:
2687             ret = -TARGET_ENOPROTOOPT;
2688             break;
2689         }
2690         break;
2691 #ifdef SOL_NETLINK
2692     case SOL_NETLINK:
2693         switch (optname) {
2694         case NETLINK_PKTINFO:
2695         case NETLINK_BROADCAST_ERROR:
2696         case NETLINK_NO_ENOBUFS:
2697 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2698         case NETLINK_LISTEN_ALL_NSID:
2699         case NETLINK_CAP_ACK:
2700 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2701 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2702         case NETLINK_EXT_ACK:
2703 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2704 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2705         case NETLINK_GET_STRICT_CHK:
2706 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2707             if (get_user_u32(len, optlen)) {
2708                 return -TARGET_EFAULT;
2709             }
2710             if (len != sizeof(val)) {
2711                 return -TARGET_EINVAL;
2712             }
2713             lv = len;
2714             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2715             if (ret < 0) {
2716                 return ret;
2717             }
2718             if (put_user_u32(lv, optlen)
2719                 || put_user_u32(val, optval_addr)) {
2720                 return -TARGET_EFAULT;
2721             }
2722             break;
2723 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2724         case NETLINK_LIST_MEMBERSHIPS:
2725         {
2726             uint32_t *results;
2727             int i;
2728             if (get_user_u32(len, optlen)) {
2729                 return -TARGET_EFAULT;
2730             }
2731             if (len < 0) {
2732                 return -TARGET_EINVAL;
2733             }
2734             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2735             if (!results) {
2736                 return -TARGET_EFAULT;
2737             }
2738             lv = len;
2739             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2740             if (ret < 0) {
2741                 unlock_user(results, optval_addr, 0);
2742                 return ret;
2743             }
2744             /* swap host endianess to target endianess. */
2745             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2746                 results[i] = tswap32(results[i]);
2747             }
2748             if (put_user_u32(lv, optlen)) {
2749                 return -TARGET_EFAULT;
2750             }
2751             unlock_user(results, optval_addr, 0);
2752             break;
2753         }
2754 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2755         default:
2756             goto unimplemented;
2757         }
2758         break;
2759 #endif /* SOL_NETLINK */
2760     default:
2761     unimplemented:
2762         qemu_log_mask(LOG_UNIMP,
2763                       "getsockopt level=%d optname=%d not yet supported\n",
2764                       level, optname);
2765         ret = -TARGET_EOPNOTSUPP;
2766         break;
2767     }
2768     return ret;
2769 }
2770 
2771 /* Convert target low/high pair representing file offset into the host
2772  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2773  * as the kernel doesn't handle them either.
2774  */
2775 static void target_to_host_low_high(abi_ulong tlow,
2776                                     abi_ulong thigh,
2777                                     unsigned long *hlow,
2778                                     unsigned long *hhigh)
2779 {
2780     uint64_t off = tlow |
2781         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2782         TARGET_LONG_BITS / 2;
2783 
2784     *hlow = off;
2785     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2786 }
2787 
2788 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2789                                 abi_ulong count, int copy)
2790 {
2791     struct target_iovec *target_vec;
2792     struct iovec *vec;
2793     abi_ulong total_len, max_len;
2794     int i;
2795     int err = 0;
2796     bool bad_address = false;
2797 
2798     if (count == 0) {
2799         errno = 0;
2800         return NULL;
2801     }
2802     if (count > IOV_MAX) {
2803         errno = EINVAL;
2804         return NULL;
2805     }
2806 
2807     vec = g_try_new0(struct iovec, count);
2808     if (vec == NULL) {
2809         errno = ENOMEM;
2810         return NULL;
2811     }
2812 
2813     target_vec = lock_user(VERIFY_READ, target_addr,
2814                            count * sizeof(struct target_iovec), 1);
2815     if (target_vec == NULL) {
2816         err = EFAULT;
2817         goto fail2;
2818     }
2819 
2820     /* ??? If host page size > target page size, this will result in a
2821        value larger than what we can actually support.  */
2822     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2823     total_len = 0;
2824 
2825     for (i = 0; i < count; i++) {
2826         abi_ulong base = tswapal(target_vec[i].iov_base);
2827         abi_long len = tswapal(target_vec[i].iov_len);
2828 
2829         if (len < 0) {
2830             err = EINVAL;
2831             goto fail;
2832         } else if (len == 0) {
2833             /* Zero length pointer is ignored.  */
2834             vec[i].iov_base = 0;
2835         } else {
2836             vec[i].iov_base = lock_user(type, base, len, copy);
2837             /* If the first buffer pointer is bad, this is a fault.  But
2838              * subsequent bad buffers will result in a partial write; this
2839              * is realized by filling the vector with null pointers and
2840              * zero lengths. */
2841             if (!vec[i].iov_base) {
2842                 if (i == 0) {
2843                     err = EFAULT;
2844                     goto fail;
2845                 } else {
2846                     bad_address = true;
2847                 }
2848             }
2849             if (bad_address) {
2850                 len = 0;
2851             }
2852             if (len > max_len - total_len) {
2853                 len = max_len - total_len;
2854             }
2855         }
2856         vec[i].iov_len = len;
2857         total_len += len;
2858     }
2859 
2860     unlock_user(target_vec, target_addr, 0);
2861     return vec;
2862 
2863  fail:
2864     while (--i >= 0) {
2865         if (tswapal(target_vec[i].iov_len) > 0) {
2866             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2867         }
2868     }
2869     unlock_user(target_vec, target_addr, 0);
2870  fail2:
2871     g_free(vec);
2872     errno = err;
2873     return NULL;
2874 }
2875 
2876 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2877                          abi_ulong count, int copy)
2878 {
2879     struct target_iovec *target_vec;
2880     int i;
2881 
2882     target_vec = lock_user(VERIFY_READ, target_addr,
2883                            count * sizeof(struct target_iovec), 1);
2884     if (target_vec) {
2885         for (i = 0; i < count; i++) {
2886             abi_ulong base = tswapal(target_vec[i].iov_base);
2887             abi_long len = tswapal(target_vec[i].iov_len);
2888             if (len < 0) {
2889                 break;
2890             }
2891             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2892         }
2893         unlock_user(target_vec, target_addr, 0);
2894     }
2895 
2896     g_free(vec);
2897 }
2898 
2899 static inline int target_to_host_sock_type(int *type)
2900 {
2901     int host_type = 0;
2902     int target_type = *type;
2903 
2904     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2905     case TARGET_SOCK_DGRAM:
2906         host_type = SOCK_DGRAM;
2907         break;
2908     case TARGET_SOCK_STREAM:
2909         host_type = SOCK_STREAM;
2910         break;
2911     default:
2912         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2913         break;
2914     }
2915     if (target_type & TARGET_SOCK_CLOEXEC) {
2916 #if defined(SOCK_CLOEXEC)
2917         host_type |= SOCK_CLOEXEC;
2918 #else
2919         return -TARGET_EINVAL;
2920 #endif
2921     }
2922     if (target_type & TARGET_SOCK_NONBLOCK) {
2923 #if defined(SOCK_NONBLOCK)
2924         host_type |= SOCK_NONBLOCK;
2925 #elif !defined(O_NONBLOCK)
2926         return -TARGET_EINVAL;
2927 #endif
2928     }
2929     *type = host_type;
2930     return 0;
2931 }
2932 
2933 /* Try to emulate socket type flags after socket creation.  */
2934 static int sock_flags_fixup(int fd, int target_type)
2935 {
2936 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2937     if (target_type & TARGET_SOCK_NONBLOCK) {
2938         int flags = fcntl(fd, F_GETFL);
2939         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2940             close(fd);
2941             return -TARGET_EINVAL;
2942         }
2943     }
2944 #endif
2945     return fd;
2946 }
2947 
2948 /* do_socket() Must return target values and target errnos. */
2949 static abi_long do_socket(int domain, int type, int protocol)
2950 {
2951     int target_type = type;
2952     int ret;
2953 
2954     ret = target_to_host_sock_type(&type);
2955     if (ret) {
2956         return ret;
2957     }
2958 
2959     if (domain == PF_NETLINK && !(
2960 #ifdef CONFIG_RTNETLINK
2961          protocol == NETLINK_ROUTE ||
2962 #endif
2963          protocol == NETLINK_KOBJECT_UEVENT ||
2964          protocol == NETLINK_AUDIT)) {
2965         return -TARGET_EPROTONOSUPPORT;
2966     }
2967 
2968     if (domain == AF_PACKET ||
2969         (domain == AF_INET && type == SOCK_PACKET)) {
2970         protocol = tswap16(protocol);
2971     }
2972 
2973     ret = get_errno(socket(domain, type, protocol));
2974     if (ret >= 0) {
2975         ret = sock_flags_fixup(ret, target_type);
2976         if (type == SOCK_PACKET) {
2977             /* Manage an obsolete case :
2978              * if socket type is SOCK_PACKET, bind by name
2979              */
2980             fd_trans_register(ret, &target_packet_trans);
2981         } else if (domain == PF_NETLINK) {
2982             switch (protocol) {
2983 #ifdef CONFIG_RTNETLINK
2984             case NETLINK_ROUTE:
2985                 fd_trans_register(ret, &target_netlink_route_trans);
2986                 break;
2987 #endif
2988             case NETLINK_KOBJECT_UEVENT:
2989                 /* nothing to do: messages are strings */
2990                 break;
2991             case NETLINK_AUDIT:
2992                 fd_trans_register(ret, &target_netlink_audit_trans);
2993                 break;
2994             default:
2995                 g_assert_not_reached();
2996             }
2997         }
2998     }
2999     return ret;
3000 }
3001 
3002 /* do_bind() Must return target values and target errnos. */
3003 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3004                         socklen_t addrlen)
3005 {
3006     void *addr;
3007     abi_long ret;
3008 
3009     if ((int)addrlen < 0) {
3010         return -TARGET_EINVAL;
3011     }
3012 
3013     addr = alloca(addrlen+1);
3014 
3015     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3016     if (ret)
3017         return ret;
3018 
3019     return get_errno(bind(sockfd, addr, addrlen));
3020 }
3021 
3022 /* do_connect() Must return target values and target errnos. */
3023 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3024                            socklen_t addrlen)
3025 {
3026     void *addr;
3027     abi_long ret;
3028 
3029     if ((int)addrlen < 0) {
3030         return -TARGET_EINVAL;
3031     }
3032 
3033     addr = alloca(addrlen+1);
3034 
3035     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3036     if (ret)
3037         return ret;
3038 
3039     return get_errno(safe_connect(sockfd, addr, addrlen));
3040 }
3041 
3042 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3043 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3044                                       int flags, int send)
3045 {
3046     abi_long ret, len;
3047     struct msghdr msg;
3048     abi_ulong count;
3049     struct iovec *vec;
3050     abi_ulong target_vec;
3051 
3052     if (msgp->msg_name) {
3053         msg.msg_namelen = tswap32(msgp->msg_namelen);
3054         msg.msg_name = alloca(msg.msg_namelen+1);
3055         ret = target_to_host_sockaddr(fd, msg.msg_name,
3056                                       tswapal(msgp->msg_name),
3057                                       msg.msg_namelen);
3058         if (ret == -TARGET_EFAULT) {
3059             /* For connected sockets msg_name and msg_namelen must
3060              * be ignored, so returning EFAULT immediately is wrong.
3061              * Instead, pass a bad msg_name to the host kernel, and
3062              * let it decide whether to return EFAULT or not.
3063              */
3064             msg.msg_name = (void *)-1;
3065         } else if (ret) {
3066             goto out2;
3067         }
3068     } else {
3069         msg.msg_name = NULL;
3070         msg.msg_namelen = 0;
3071     }
3072     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3073     msg.msg_control = alloca(msg.msg_controllen);
3074     memset(msg.msg_control, 0, msg.msg_controllen);
3075 
3076     msg.msg_flags = tswap32(msgp->msg_flags);
3077 
3078     count = tswapal(msgp->msg_iovlen);
3079     target_vec = tswapal(msgp->msg_iov);
3080 
3081     if (count > IOV_MAX) {
3082         /* sendrcvmsg returns a different errno for this condition than
3083          * readv/writev, so we must catch it here before lock_iovec() does.
3084          */
3085         ret = -TARGET_EMSGSIZE;
3086         goto out2;
3087     }
3088 
3089     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3090                      target_vec, count, send);
3091     if (vec == NULL) {
3092         ret = -host_to_target_errno(errno);
3093         goto out2;
3094     }
3095     msg.msg_iovlen = count;
3096     msg.msg_iov = vec;
3097 
3098     if (send) {
3099         if (fd_trans_target_to_host_data(fd)) {
3100             void *host_msg;
3101 
3102             host_msg = g_malloc(msg.msg_iov->iov_len);
3103             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3104             ret = fd_trans_target_to_host_data(fd)(host_msg,
3105                                                    msg.msg_iov->iov_len);
3106             if (ret >= 0) {
3107                 msg.msg_iov->iov_base = host_msg;
3108                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3109             }
3110             g_free(host_msg);
3111         } else {
3112             ret = target_to_host_cmsg(&msg, msgp);
3113             if (ret == 0) {
3114                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3115             }
3116         }
3117     } else {
3118         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3119         if (!is_error(ret)) {
3120             len = ret;
3121             if (fd_trans_host_to_target_data(fd)) {
3122                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3123                                                MIN(msg.msg_iov->iov_len, len));
3124             } else {
3125                 ret = host_to_target_cmsg(msgp, &msg);
3126             }
3127             if (!is_error(ret)) {
3128                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3129                 msgp->msg_flags = tswap32(msg.msg_flags);
3130                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3131                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3132                                     msg.msg_name, msg.msg_namelen);
3133                     if (ret) {
3134                         goto out;
3135                     }
3136                 }
3137 
3138                 ret = len;
3139             }
3140         }
3141     }
3142 
3143 out:
3144     unlock_iovec(vec, target_vec, count, !send);
3145 out2:
3146     return ret;
3147 }
3148 
3149 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3150                                int flags, int send)
3151 {
3152     abi_long ret;
3153     struct target_msghdr *msgp;
3154 
3155     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3156                           msgp,
3157                           target_msg,
3158                           send ? 1 : 0)) {
3159         return -TARGET_EFAULT;
3160     }
3161     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3162     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3163     return ret;
3164 }
3165 
3166 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3167  * so it might not have this *mmsg-specific flag either.
3168  */
3169 #ifndef MSG_WAITFORONE
3170 #define MSG_WAITFORONE 0x10000
3171 #endif
3172 
3173 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3174                                 unsigned int vlen, unsigned int flags,
3175                                 int send)
3176 {
3177     struct target_mmsghdr *mmsgp;
3178     abi_long ret = 0;
3179     int i;
3180 
3181     if (vlen > UIO_MAXIOV) {
3182         vlen = UIO_MAXIOV;
3183     }
3184 
3185     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3186     if (!mmsgp) {
3187         return -TARGET_EFAULT;
3188     }
3189 
3190     for (i = 0; i < vlen; i++) {
3191         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3192         if (is_error(ret)) {
3193             break;
3194         }
3195         mmsgp[i].msg_len = tswap32(ret);
3196         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3197         if (flags & MSG_WAITFORONE) {
3198             flags |= MSG_DONTWAIT;
3199         }
3200     }
3201 
3202     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3203 
3204     /* Return number of datagrams sent if we sent any at all;
3205      * otherwise return the error.
3206      */
3207     if (i) {
3208         return i;
3209     }
3210     return ret;
3211 }
3212 
3213 /* do_accept4() Must return target values and target errnos. */
3214 static abi_long do_accept4(int fd, abi_ulong target_addr,
3215                            abi_ulong target_addrlen_addr, int flags)
3216 {
3217     socklen_t addrlen, ret_addrlen;
3218     void *addr;
3219     abi_long ret;
3220     int host_flags;
3221 
3222     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3223 
3224     if (target_addr == 0) {
3225         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3226     }
3227 
3228     /* linux returns EINVAL if addrlen pointer is invalid */
3229     if (get_user_u32(addrlen, target_addrlen_addr))
3230         return -TARGET_EINVAL;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3237         return -TARGET_EINVAL;
3238 
3239     addr = alloca(addrlen);
3240 
3241     ret_addrlen = addrlen;
3242     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3243     if (!is_error(ret)) {
3244         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3245         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3246             ret = -TARGET_EFAULT;
3247         }
3248     }
3249     return ret;
3250 }
3251 
3252 /* do_getpeername() Must return target values and target errnos. */
3253 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3254                                abi_ulong target_addrlen_addr)
3255 {
3256     socklen_t addrlen, ret_addrlen;
3257     void *addr;
3258     abi_long ret;
3259 
3260     if (get_user_u32(addrlen, target_addrlen_addr))
3261         return -TARGET_EFAULT;
3262 
3263     if ((int)addrlen < 0) {
3264         return -TARGET_EINVAL;
3265     }
3266 
3267     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268         return -TARGET_EFAULT;
3269 
3270     addr = alloca(addrlen);
3271 
3272     ret_addrlen = addrlen;
3273     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3274     if (!is_error(ret)) {
3275         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277             ret = -TARGET_EFAULT;
3278         }
3279     }
3280     return ret;
3281 }
3282 
3283 /* do_getsockname() Must return target values and target errnos. */
3284 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3285                                abi_ulong target_addrlen_addr)
3286 {
3287     socklen_t addrlen, ret_addrlen;
3288     void *addr;
3289     abi_long ret;
3290 
3291     if (get_user_u32(addrlen, target_addrlen_addr))
3292         return -TARGET_EFAULT;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299         return -TARGET_EFAULT;
3300 
3301     addr = alloca(addrlen);
3302 
3303     ret_addrlen = addrlen;
3304     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3305     if (!is_error(ret)) {
3306         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308             ret = -TARGET_EFAULT;
3309         }
3310     }
3311     return ret;
3312 }
3313 
3314 /* do_socketpair() Must return target values and target errnos. */
3315 static abi_long do_socketpair(int domain, int type, int protocol,
3316                               abi_ulong target_tab_addr)
3317 {
3318     int tab[2];
3319     abi_long ret;
3320 
3321     target_to_host_sock_type(&type);
3322 
3323     ret = get_errno(socketpair(domain, type, protocol, tab));
3324     if (!is_error(ret)) {
3325         if (put_user_s32(tab[0], target_tab_addr)
3326             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3327             ret = -TARGET_EFAULT;
3328     }
3329     return ret;
3330 }
3331 
3332 /* do_sendto() Must return target values and target errnos. */
3333 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3334                           abi_ulong target_addr, socklen_t addrlen)
3335 {
3336     void *addr;
3337     void *host_msg;
3338     void *copy_msg = NULL;
3339     abi_long ret;
3340 
3341     if ((int)addrlen < 0) {
3342         return -TARGET_EINVAL;
3343     }
3344 
3345     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3346     if (!host_msg)
3347         return -TARGET_EFAULT;
3348     if (fd_trans_target_to_host_data(fd)) {
3349         copy_msg = host_msg;
3350         host_msg = g_malloc(len);
3351         memcpy(host_msg, copy_msg, len);
3352         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3353         if (ret < 0) {
3354             goto fail;
3355         }
3356     }
3357     if (target_addr) {
3358         addr = alloca(addrlen+1);
3359         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3360         if (ret) {
3361             goto fail;
3362         }
3363         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3364     } else {
3365         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3366     }
3367 fail:
3368     if (copy_msg) {
3369         g_free(host_msg);
3370         host_msg = copy_msg;
3371     }
3372     unlock_user(host_msg, msg, 0);
3373     return ret;
3374 }
3375 
3376 /* do_recvfrom() Must return target values and target errnos. */
3377 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3378                             abi_ulong target_addr,
3379                             abi_ulong target_addrlen)
3380 {
3381     socklen_t addrlen, ret_addrlen;
3382     void *addr;
3383     void *host_msg;
3384     abi_long ret;
3385 
3386     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3387     if (!host_msg)
3388         return -TARGET_EFAULT;
3389     if (target_addr) {
3390         if (get_user_u32(addrlen, target_addrlen)) {
3391             ret = -TARGET_EFAULT;
3392             goto fail;
3393         }
3394         if ((int)addrlen < 0) {
3395             ret = -TARGET_EINVAL;
3396             goto fail;
3397         }
3398         addr = alloca(addrlen);
3399         ret_addrlen = addrlen;
3400         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3401                                       addr, &ret_addrlen));
3402     } else {
3403         addr = NULL; /* To keep compiler quiet.  */
3404         addrlen = 0; /* To keep compiler quiet.  */
3405         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3406     }
3407     if (!is_error(ret)) {
3408         if (fd_trans_host_to_target_data(fd)) {
3409             abi_long trans;
3410             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3411             if (is_error(trans)) {
3412                 ret = trans;
3413                 goto fail;
3414             }
3415         }
3416         if (target_addr) {
3417             host_to_target_sockaddr(target_addr, addr,
3418                                     MIN(addrlen, ret_addrlen));
3419             if (put_user_u32(ret_addrlen, target_addrlen)) {
3420                 ret = -TARGET_EFAULT;
3421                 goto fail;
3422             }
3423         }
3424         unlock_user(host_msg, msg, len);
3425     } else {
3426 fail:
3427         unlock_user(host_msg, msg, 0);
3428     }
3429     return ret;
3430 }
3431 
3432 #ifdef TARGET_NR_socketcall
3433 /* do_socketcall() must return target values and target errnos. */
3434 static abi_long do_socketcall(int num, abi_ulong vptr)
3435 {
3436     static const unsigned nargs[] = { /* number of arguments per operation */
3437         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3438         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3439         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3440         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3441         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3442         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3443         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3444         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3445         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3446         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3447         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3448         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3449         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3450         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3451         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3452         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3453         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3454         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3455         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3456         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3457     };
3458     abi_long a[6]; /* max 6 args */
3459     unsigned i;
3460 
3461     /* check the range of the first argument num */
3462     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3463     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3464         return -TARGET_EINVAL;
3465     }
3466     /* ensure we have space for args */
3467     if (nargs[num] > ARRAY_SIZE(a)) {
3468         return -TARGET_EINVAL;
3469     }
3470     /* collect the arguments in a[] according to nargs[] */
3471     for (i = 0; i < nargs[num]; ++i) {
3472         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3473             return -TARGET_EFAULT;
3474         }
3475     }
3476     /* now when we have the args, invoke the appropriate underlying function */
3477     switch (num) {
3478     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3479         return do_socket(a[0], a[1], a[2]);
3480     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3481         return do_bind(a[0], a[1], a[2]);
3482     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3483         return do_connect(a[0], a[1], a[2]);
3484     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3485         return get_errno(listen(a[0], a[1]));
3486     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3487         return do_accept4(a[0], a[1], a[2], 0);
3488     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3489         return do_getsockname(a[0], a[1], a[2]);
3490     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3491         return do_getpeername(a[0], a[1], a[2]);
3492     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3493         return do_socketpair(a[0], a[1], a[2], a[3]);
3494     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3495         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3496     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3497         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3498     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3499         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3500     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3501         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3502     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3503         return get_errno(shutdown(a[0], a[1]));
3504     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3505         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3506     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3507         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3508     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3509         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3510     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3511         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3512     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3513         return do_accept4(a[0], a[1], a[2], a[3]);
3514     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3515         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3516     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3517         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3518     default:
3519         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3520         return -TARGET_EINVAL;
3521     }
3522 }
3523 #endif
3524 
3525 #define N_SHM_REGIONS	32
3526 
3527 static struct shm_region {
3528     abi_ulong start;
3529     abi_ulong size;
3530     bool in_use;
3531 } shm_regions[N_SHM_REGIONS];
3532 
3533 #ifndef TARGET_SEMID64_DS
3534 /* asm-generic version of this struct */
3535 struct target_semid64_ds
3536 {
3537   struct target_ipc_perm sem_perm;
3538   abi_ulong sem_otime;
3539 #if TARGET_ABI_BITS == 32
3540   abi_ulong __unused1;
3541 #endif
3542   abi_ulong sem_ctime;
3543 #if TARGET_ABI_BITS == 32
3544   abi_ulong __unused2;
3545 #endif
3546   abi_ulong sem_nsems;
3547   abi_ulong __unused3;
3548   abi_ulong __unused4;
3549 };
3550 #endif
3551 
3552 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3553                                                abi_ulong target_addr)
3554 {
3555     struct target_ipc_perm *target_ip;
3556     struct target_semid64_ds *target_sd;
3557 
3558     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3559         return -TARGET_EFAULT;
3560     target_ip = &(target_sd->sem_perm);
3561     host_ip->__key = tswap32(target_ip->__key);
3562     host_ip->uid = tswap32(target_ip->uid);
3563     host_ip->gid = tswap32(target_ip->gid);
3564     host_ip->cuid = tswap32(target_ip->cuid);
3565     host_ip->cgid = tswap32(target_ip->cgid);
3566 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3567     host_ip->mode = tswap32(target_ip->mode);
3568 #else
3569     host_ip->mode = tswap16(target_ip->mode);
3570 #endif
3571 #if defined(TARGET_PPC)
3572     host_ip->__seq = tswap32(target_ip->__seq);
3573 #else
3574     host_ip->__seq = tswap16(target_ip->__seq);
3575 #endif
3576     unlock_user_struct(target_sd, target_addr, 0);
3577     return 0;
3578 }
3579 
3580 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3581                                                struct ipc_perm *host_ip)
3582 {
3583     struct target_ipc_perm *target_ip;
3584     struct target_semid64_ds *target_sd;
3585 
3586     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3587         return -TARGET_EFAULT;
3588     target_ip = &(target_sd->sem_perm);
3589     target_ip->__key = tswap32(host_ip->__key);
3590     target_ip->uid = tswap32(host_ip->uid);
3591     target_ip->gid = tswap32(host_ip->gid);
3592     target_ip->cuid = tswap32(host_ip->cuid);
3593     target_ip->cgid = tswap32(host_ip->cgid);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595     target_ip->mode = tswap32(host_ip->mode);
3596 #else
3597     target_ip->mode = tswap16(host_ip->mode);
3598 #endif
3599 #if defined(TARGET_PPC)
3600     target_ip->__seq = tswap32(host_ip->__seq);
3601 #else
3602     target_ip->__seq = tswap16(host_ip->__seq);
3603 #endif
3604     unlock_user_struct(target_sd, target_addr, 1);
3605     return 0;
3606 }
3607 
3608 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3609                                                abi_ulong target_addr)
3610 {
3611     struct target_semid64_ds *target_sd;
3612 
3613     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3614         return -TARGET_EFAULT;
3615     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3616         return -TARGET_EFAULT;
3617     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3618     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3619     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3620     unlock_user_struct(target_sd, target_addr, 0);
3621     return 0;
3622 }
3623 
3624 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3625                                                struct semid_ds *host_sd)
3626 {
3627     struct target_semid64_ds *target_sd;
3628 
3629     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3630         return -TARGET_EFAULT;
3631     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3632         return -TARGET_EFAULT;
3633     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3634     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3635     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3636     unlock_user_struct(target_sd, target_addr, 1);
3637     return 0;
3638 }
3639 
3640 struct target_seminfo {
3641     int semmap;
3642     int semmni;
3643     int semmns;
3644     int semmnu;
3645     int semmsl;
3646     int semopm;
3647     int semume;
3648     int semusz;
3649     int semvmx;
3650     int semaem;
3651 };
3652 
3653 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3654                                               struct seminfo *host_seminfo)
3655 {
3656     struct target_seminfo *target_seminfo;
3657     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3658         return -TARGET_EFAULT;
3659     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3660     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3661     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3662     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3663     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3664     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3665     __put_user(host_seminfo->semume, &target_seminfo->semume);
3666     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3667     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3668     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3669     unlock_user_struct(target_seminfo, target_addr, 1);
3670     return 0;
3671 }
3672 
3673 union semun {
3674 	int val;
3675 	struct semid_ds *buf;
3676 	unsigned short *array;
3677 	struct seminfo *__buf;
3678 };
3679 
3680 union target_semun {
3681 	int val;
3682 	abi_ulong buf;
3683 	abi_ulong array;
3684 	abi_ulong __buf;
3685 };
3686 
3687 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3688                                                abi_ulong target_addr)
3689 {
3690     int nsems;
3691     unsigned short *array;
3692     union semun semun;
3693     struct semid_ds semid_ds;
3694     int i, ret;
3695 
3696     semun.buf = &semid_ds;
3697 
3698     ret = semctl(semid, 0, IPC_STAT, semun);
3699     if (ret == -1)
3700         return get_errno(ret);
3701 
3702     nsems = semid_ds.sem_nsems;
3703 
3704     *host_array = g_try_new(unsigned short, nsems);
3705     if (!*host_array) {
3706         return -TARGET_ENOMEM;
3707     }
3708     array = lock_user(VERIFY_READ, target_addr,
3709                       nsems*sizeof(unsigned short), 1);
3710     if (!array) {
3711         g_free(*host_array);
3712         return -TARGET_EFAULT;
3713     }
3714 
3715     for(i=0; i<nsems; i++) {
3716         __get_user((*host_array)[i], &array[i]);
3717     }
3718     unlock_user(array, target_addr, 0);
3719 
3720     return 0;
3721 }
3722 
3723 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3724                                                unsigned short **host_array)
3725 {
3726     int nsems;
3727     unsigned short *array;
3728     union semun semun;
3729     struct semid_ds semid_ds;
3730     int i, ret;
3731 
3732     semun.buf = &semid_ds;
3733 
3734     ret = semctl(semid, 0, IPC_STAT, semun);
3735     if (ret == -1)
3736         return get_errno(ret);
3737 
3738     nsems = semid_ds.sem_nsems;
3739 
3740     array = lock_user(VERIFY_WRITE, target_addr,
3741                       nsems*sizeof(unsigned short), 0);
3742     if (!array)
3743         return -TARGET_EFAULT;
3744 
3745     for(i=0; i<nsems; i++) {
3746         __put_user((*host_array)[i], &array[i]);
3747     }
3748     g_free(*host_array);
3749     unlock_user(array, target_addr, 1);
3750 
3751     return 0;
3752 }
3753 
3754 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3755                                  abi_ulong target_arg)
3756 {
3757     union target_semun target_su = { .buf = target_arg };
3758     union semun arg;
3759     struct semid_ds dsarg;
3760     unsigned short *array = NULL;
3761     struct seminfo seminfo;
3762     abi_long ret = -TARGET_EINVAL;
3763     abi_long err;
3764     cmd &= 0xff;
3765 
3766     switch( cmd ) {
3767 	case GETVAL:
3768 	case SETVAL:
3769             /* In 64 bit cross-endian situations, we will erroneously pick up
3770              * the wrong half of the union for the "val" element.  To rectify
3771              * this, the entire 8-byte structure is byteswapped, followed by
3772 	     * a swap of the 4 byte val field. In other cases, the data is
3773 	     * already in proper host byte order. */
3774 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3775 		target_su.buf = tswapal(target_su.buf);
3776 		arg.val = tswap32(target_su.val);
3777 	    } else {
3778 		arg.val = target_su.val;
3779 	    }
3780             ret = get_errno(semctl(semid, semnum, cmd, arg));
3781             break;
3782 	case GETALL:
3783 	case SETALL:
3784             err = target_to_host_semarray(semid, &array, target_su.array);
3785             if (err)
3786                 return err;
3787             arg.array = array;
3788             ret = get_errno(semctl(semid, semnum, cmd, arg));
3789             err = host_to_target_semarray(semid, target_su.array, &array);
3790             if (err)
3791                 return err;
3792             break;
3793 	case IPC_STAT:
3794 	case IPC_SET:
3795 	case SEM_STAT:
3796             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3797             if (err)
3798                 return err;
3799             arg.buf = &dsarg;
3800             ret = get_errno(semctl(semid, semnum, cmd, arg));
3801             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3802             if (err)
3803                 return err;
3804             break;
3805 	case IPC_INFO:
3806 	case SEM_INFO:
3807             arg.__buf = &seminfo;
3808             ret = get_errno(semctl(semid, semnum, cmd, arg));
3809             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3810             if (err)
3811                 return err;
3812             break;
3813 	case IPC_RMID:
3814 	case GETPID:
3815 	case GETNCNT:
3816 	case GETZCNT:
3817             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3818             break;
3819     }
3820 
3821     return ret;
3822 }
3823 
3824 struct target_sembuf {
3825     unsigned short sem_num;
3826     short sem_op;
3827     short sem_flg;
3828 };
3829 
3830 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3831                                              abi_ulong target_addr,
3832                                              unsigned nsops)
3833 {
3834     struct target_sembuf *target_sembuf;
3835     int i;
3836 
3837     target_sembuf = lock_user(VERIFY_READ, target_addr,
3838                               nsops*sizeof(struct target_sembuf), 1);
3839     if (!target_sembuf)
3840         return -TARGET_EFAULT;
3841 
3842     for(i=0; i<nsops; i++) {
3843         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3844         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3845         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3846     }
3847 
3848     unlock_user(target_sembuf, target_addr, 0);
3849 
3850     return 0;
3851 }
3852 
3853 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3854     defined(TARGET_NR_semtimedop)
3855 
3856 /*
3857  * This macro is required to handle the s390 variants, which passes the
3858  * arguments in a different order than default.
3859  */
3860 #ifdef __s390x__
3861 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3862   (__nsops), (__timeout), (__sops)
3863 #else
3864 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3865   (__nsops), 0, (__sops), (__timeout)
3866 #endif
3867 
3868 static inline abi_long do_semtimedop(int semid,
3869                                      abi_long ptr,
3870                                      unsigned nsops,
3871                                      abi_long timeout)
3872 {
3873     struct sembuf *sops;
3874     struct timespec ts, *pts = NULL;
3875     abi_long ret;
3876 
3877     if (timeout) {
3878         pts = &ts;
3879         if (target_to_host_timespec(pts, timeout)) {
3880             return -TARGET_EFAULT;
3881         }
3882     }
3883 
3884     if (nsops > TARGET_SEMOPM) {
3885         return -TARGET_E2BIG;
3886     }
3887 
3888     sops = g_new(struct sembuf, nsops);
3889 
3890     if (target_to_host_sembuf(sops, ptr, nsops)) {
3891         g_free(sops);
3892         return -TARGET_EFAULT;
3893     }
3894 
3895     ret = -TARGET_ENOSYS;
3896 #ifdef __NR_semtimedop
3897     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3898 #endif
3899 #ifdef __NR_ipc
3900     if (ret == -TARGET_ENOSYS) {
3901         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3902                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3903     }
3904 #endif
3905     g_free(sops);
3906     return ret;
3907 }
3908 #endif
3909 
3910 struct target_msqid_ds
3911 {
3912     struct target_ipc_perm msg_perm;
3913     abi_ulong msg_stime;
3914 #if TARGET_ABI_BITS == 32
3915     abi_ulong __unused1;
3916 #endif
3917     abi_ulong msg_rtime;
3918 #if TARGET_ABI_BITS == 32
3919     abi_ulong __unused2;
3920 #endif
3921     abi_ulong msg_ctime;
3922 #if TARGET_ABI_BITS == 32
3923     abi_ulong __unused3;
3924 #endif
3925     abi_ulong __msg_cbytes;
3926     abi_ulong msg_qnum;
3927     abi_ulong msg_qbytes;
3928     abi_ulong msg_lspid;
3929     abi_ulong msg_lrpid;
3930     abi_ulong __unused4;
3931     abi_ulong __unused5;
3932 };
3933 
3934 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3935                                                abi_ulong target_addr)
3936 {
3937     struct target_msqid_ds *target_md;
3938 
3939     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3940         return -TARGET_EFAULT;
3941     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3942         return -TARGET_EFAULT;
3943     host_md->msg_stime = tswapal(target_md->msg_stime);
3944     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3945     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3946     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3947     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3948     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3949     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3950     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3951     unlock_user_struct(target_md, target_addr, 0);
3952     return 0;
3953 }
3954 
3955 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3956                                                struct msqid_ds *host_md)
3957 {
3958     struct target_msqid_ds *target_md;
3959 
3960     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3961         return -TARGET_EFAULT;
3962     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3963         return -TARGET_EFAULT;
3964     target_md->msg_stime = tswapal(host_md->msg_stime);
3965     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3966     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3967     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3968     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3969     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3970     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3971     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3972     unlock_user_struct(target_md, target_addr, 1);
3973     return 0;
3974 }
3975 
3976 struct target_msginfo {
3977     int msgpool;
3978     int msgmap;
3979     int msgmax;
3980     int msgmnb;
3981     int msgmni;
3982     int msgssz;
3983     int msgtql;
3984     unsigned short int msgseg;
3985 };
3986 
3987 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3988                                               struct msginfo *host_msginfo)
3989 {
3990     struct target_msginfo *target_msginfo;
3991     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3992         return -TARGET_EFAULT;
3993     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3994     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3995     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3996     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3997     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3998     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3999     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4000     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4001     unlock_user_struct(target_msginfo, target_addr, 1);
4002     return 0;
4003 }
4004 
4005 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4006 {
4007     struct msqid_ds dsarg;
4008     struct msginfo msginfo;
4009     abi_long ret = -TARGET_EINVAL;
4010 
4011     cmd &= 0xff;
4012 
4013     switch (cmd) {
4014     case IPC_STAT:
4015     case IPC_SET:
4016     case MSG_STAT:
4017         if (target_to_host_msqid_ds(&dsarg,ptr))
4018             return -TARGET_EFAULT;
4019         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4020         if (host_to_target_msqid_ds(ptr,&dsarg))
4021             return -TARGET_EFAULT;
4022         break;
4023     case IPC_RMID:
4024         ret = get_errno(msgctl(msgid, cmd, NULL));
4025         break;
4026     case IPC_INFO:
4027     case MSG_INFO:
4028         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4029         if (host_to_target_msginfo(ptr, &msginfo))
4030             return -TARGET_EFAULT;
4031         break;
4032     }
4033 
4034     return ret;
4035 }
4036 
4037 struct target_msgbuf {
4038     abi_long mtype;
4039     char	mtext[1];
4040 };
4041 
4042 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4043                                  ssize_t msgsz, int msgflg)
4044 {
4045     struct target_msgbuf *target_mb;
4046     struct msgbuf *host_mb;
4047     abi_long ret = 0;
4048 
4049     if (msgsz < 0) {
4050         return -TARGET_EINVAL;
4051     }
4052 
4053     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4054         return -TARGET_EFAULT;
4055     host_mb = g_try_malloc(msgsz + sizeof(long));
4056     if (!host_mb) {
4057         unlock_user_struct(target_mb, msgp, 0);
4058         return -TARGET_ENOMEM;
4059     }
4060     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4061     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4062     ret = -TARGET_ENOSYS;
4063 #ifdef __NR_msgsnd
4064     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4065 #endif
4066 #ifdef __NR_ipc
4067     if (ret == -TARGET_ENOSYS) {
4068 #ifdef __s390x__
4069         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4070                                  host_mb));
4071 #else
4072         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4073                                  host_mb, 0));
4074 #endif
4075     }
4076 #endif
4077     g_free(host_mb);
4078     unlock_user_struct(target_mb, msgp, 0);
4079 
4080     return ret;
4081 }
4082 
4083 #ifdef __NR_ipc
4084 #if defined(__sparc__)
4085 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4086 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4087 #elif defined(__s390x__)
4088 /* The s390 sys_ipc variant has only five parameters.  */
4089 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4090     ((long int[]){(long int)__msgp, __msgtyp})
4091 #else
4092 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4093     ((long int[]){(long int)__msgp, __msgtyp}), 0
4094 #endif
4095 #endif
4096 
4097 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4098                                  ssize_t msgsz, abi_long msgtyp,
4099                                  int msgflg)
4100 {
4101     struct target_msgbuf *target_mb;
4102     char *target_mtext;
4103     struct msgbuf *host_mb;
4104     abi_long ret = 0;
4105 
4106     if (msgsz < 0) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4111         return -TARGET_EFAULT;
4112 
4113     host_mb = g_try_malloc(msgsz + sizeof(long));
4114     if (!host_mb) {
4115         ret = -TARGET_ENOMEM;
4116         goto end;
4117     }
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_msgrcv
4120     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4125                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4126     }
4127 #endif
4128 
4129     if (ret > 0) {
4130         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4131         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4132         if (!target_mtext) {
4133             ret = -TARGET_EFAULT;
4134             goto end;
4135         }
4136         memcpy(target_mb->mtext, host_mb->mtext, ret);
4137         unlock_user(target_mtext, target_mtext_addr, ret);
4138     }
4139 
4140     target_mb->mtype = tswapal(host_mb->mtype);
4141 
4142 end:
4143     if (target_mb)
4144         unlock_user_struct(target_mb, msgp, 1);
4145     g_free(host_mb);
4146     return ret;
4147 }
4148 
4149 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4150                                                abi_ulong target_addr)
4151 {
4152     struct target_shmid_ds *target_sd;
4153 
4154     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4155         return -TARGET_EFAULT;
4156     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4157         return -TARGET_EFAULT;
4158     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4159     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4160     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4161     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4162     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4163     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4164     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4165     unlock_user_struct(target_sd, target_addr, 0);
4166     return 0;
4167 }
4168 
4169 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4170                                                struct shmid_ds *host_sd)
4171 {
4172     struct target_shmid_ds *target_sd;
4173 
4174     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4175         return -TARGET_EFAULT;
4176     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4177         return -TARGET_EFAULT;
4178     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4179     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4180     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4181     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4182     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4183     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4184     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4185     unlock_user_struct(target_sd, target_addr, 1);
4186     return 0;
4187 }
4188 
4189 struct  target_shminfo {
4190     abi_ulong shmmax;
4191     abi_ulong shmmin;
4192     abi_ulong shmmni;
4193     abi_ulong shmseg;
4194     abi_ulong shmall;
4195 };
4196 
4197 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4198                                               struct shminfo *host_shminfo)
4199 {
4200     struct target_shminfo *target_shminfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4204     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4205     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4206     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4207     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4208     unlock_user_struct(target_shminfo, target_addr, 1);
4209     return 0;
4210 }
4211 
4212 struct target_shm_info {
4213     int used_ids;
4214     abi_ulong shm_tot;
4215     abi_ulong shm_rss;
4216     abi_ulong shm_swp;
4217     abi_ulong swap_attempts;
4218     abi_ulong swap_successes;
4219 };
4220 
4221 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4222                                                struct shm_info *host_shm_info)
4223 {
4224     struct target_shm_info *target_shm_info;
4225     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4226         return -TARGET_EFAULT;
4227     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4228     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4229     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4230     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4231     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4232     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4233     unlock_user_struct(target_shm_info, target_addr, 1);
4234     return 0;
4235 }
4236 
4237 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4238 {
4239     struct shmid_ds dsarg;
4240     struct shminfo shminfo;
4241     struct shm_info shm_info;
4242     abi_long ret = -TARGET_EINVAL;
4243 
4244     cmd &= 0xff;
4245 
4246     switch(cmd) {
4247     case IPC_STAT:
4248     case IPC_SET:
4249     case SHM_STAT:
4250         if (target_to_host_shmid_ds(&dsarg, buf))
4251             return -TARGET_EFAULT;
4252         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4253         if (host_to_target_shmid_ds(buf, &dsarg))
4254             return -TARGET_EFAULT;
4255         break;
4256     case IPC_INFO:
4257         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4258         if (host_to_target_shminfo(buf, &shminfo))
4259             return -TARGET_EFAULT;
4260         break;
4261     case SHM_INFO:
4262         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4263         if (host_to_target_shm_info(buf, &shm_info))
4264             return -TARGET_EFAULT;
4265         break;
4266     case IPC_RMID:
4267     case SHM_LOCK:
4268     case SHM_UNLOCK:
4269         ret = get_errno(shmctl(shmid, cmd, NULL));
4270         break;
4271     }
4272 
4273     return ret;
4274 }
4275 
4276 #ifndef TARGET_FORCE_SHMLBA
4277 /* For most architectures, SHMLBA is the same as the page size;
4278  * some architectures have larger values, in which case they should
4279  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4280  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4281  * and defining its own value for SHMLBA.
4282  *
4283  * The kernel also permits SHMLBA to be set by the architecture to a
4284  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4285  * this means that addresses are rounded to the large size if
4286  * SHM_RND is set but addresses not aligned to that size are not rejected
4287  * as long as they are at least page-aligned. Since the only architecture
4288  * which uses this is ia64 this code doesn't provide for that oddity.
4289  */
4290 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4291 {
4292     return TARGET_PAGE_SIZE;
4293 }
4294 #endif
4295 
4296 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4297                                  int shmid, abi_ulong shmaddr, int shmflg)
4298 {
4299     abi_long raddr;
4300     void *host_raddr;
4301     struct shmid_ds shm_info;
4302     int i,ret;
4303     abi_ulong shmlba;
4304 
4305     /* find out the length of the shared memory segment */
4306     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4307     if (is_error(ret)) {
4308         /* can't get length, bail out */
4309         return ret;
4310     }
4311 
4312     shmlba = target_shmlba(cpu_env);
4313 
4314     if (shmaddr & (shmlba - 1)) {
4315         if (shmflg & SHM_RND) {
4316             shmaddr &= ~(shmlba - 1);
4317         } else {
4318             return -TARGET_EINVAL;
4319         }
4320     }
4321     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4322         return -TARGET_EINVAL;
4323     }
4324 
4325     mmap_lock();
4326 
4327     if (shmaddr)
4328         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4329     else {
4330         abi_ulong mmap_start;
4331 
4332         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4333         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4334 
4335         if (mmap_start == -1) {
4336             errno = ENOMEM;
4337             host_raddr = (void *)-1;
4338         } else
4339             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4340     }
4341 
4342     if (host_raddr == (void *)-1) {
4343         mmap_unlock();
4344         return get_errno((long)host_raddr);
4345     }
4346     raddr=h2g((unsigned long)host_raddr);
4347 
4348     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4349                    PAGE_VALID | PAGE_READ |
4350                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4351 
4352     for (i = 0; i < N_SHM_REGIONS; i++) {
4353         if (!shm_regions[i].in_use) {
4354             shm_regions[i].in_use = true;
4355             shm_regions[i].start = raddr;
4356             shm_regions[i].size = shm_info.shm_segsz;
4357             break;
4358         }
4359     }
4360 
4361     mmap_unlock();
4362     return raddr;
4363 
4364 }
4365 
4366 static inline abi_long do_shmdt(abi_ulong shmaddr)
4367 {
4368     int i;
4369     abi_long rv;
4370 
4371     mmap_lock();
4372 
4373     for (i = 0; i < N_SHM_REGIONS; ++i) {
4374         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4375             shm_regions[i].in_use = false;
4376             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4377             break;
4378         }
4379     }
4380     rv = get_errno(shmdt(g2h(shmaddr)));
4381 
4382     mmap_unlock();
4383 
4384     return rv;
4385 }
4386 
4387 #ifdef TARGET_NR_ipc
4388 /* ??? This only works with linear mappings.  */
4389 /* do_ipc() must return target values and target errnos. */
4390 static abi_long do_ipc(CPUArchState *cpu_env,
4391                        unsigned int call, abi_long first,
4392                        abi_long second, abi_long third,
4393                        abi_long ptr, abi_long fifth)
4394 {
4395     int version;
4396     abi_long ret = 0;
4397 
4398     version = call >> 16;
4399     call &= 0xffff;
4400 
4401     switch (call) {
4402     case IPCOP_semop:
4403         ret = do_semtimedop(first, ptr, second, 0);
4404         break;
4405     case IPCOP_semtimedop:
4406     /*
4407      * The s390 sys_ipc variant has only five parameters instead of six
4408      * (as for default variant) and the only difference is the handling of
4409      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4410      * to a struct timespec where the generic variant uses fifth parameter.
4411      */
4412 #if defined(TARGET_S390X)
4413         ret = do_semtimedop(first, ptr, second, third);
4414 #else
4415         ret = do_semtimedop(first, ptr, second, fifth);
4416 #endif
4417         break;
4418 
4419     case IPCOP_semget:
4420         ret = get_errno(semget(first, second, third));
4421         break;
4422 
4423     case IPCOP_semctl: {
4424         /* The semun argument to semctl is passed by value, so dereference the
4425          * ptr argument. */
4426         abi_ulong atptr;
4427         get_user_ual(atptr, ptr);
4428         ret = do_semctl(first, second, third, atptr);
4429         break;
4430     }
4431 
4432     case IPCOP_msgget:
4433         ret = get_errno(msgget(first, second));
4434         break;
4435 
4436     case IPCOP_msgsnd:
4437         ret = do_msgsnd(first, ptr, second, third);
4438         break;
4439 
4440     case IPCOP_msgctl:
4441         ret = do_msgctl(first, second, ptr);
4442         break;
4443 
4444     case IPCOP_msgrcv:
4445         switch (version) {
4446         case 0:
4447             {
4448                 struct target_ipc_kludge {
4449                     abi_long msgp;
4450                     abi_long msgtyp;
4451                 } *tmp;
4452 
4453                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4454                     ret = -TARGET_EFAULT;
4455                     break;
4456                 }
4457 
4458                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4459 
4460                 unlock_user_struct(tmp, ptr, 0);
4461                 break;
4462             }
4463         default:
4464             ret = do_msgrcv(first, ptr, second, fifth, third);
4465         }
4466         break;
4467 
4468     case IPCOP_shmat:
4469         switch (version) {
4470         default:
4471         {
4472             abi_ulong raddr;
4473             raddr = do_shmat(cpu_env, first, ptr, second);
4474             if (is_error(raddr))
4475                 return get_errno(raddr);
4476             if (put_user_ual(raddr, third))
4477                 return -TARGET_EFAULT;
4478             break;
4479         }
4480         case 1:
4481             ret = -TARGET_EINVAL;
4482             break;
4483         }
4484 	break;
4485     case IPCOP_shmdt:
4486         ret = do_shmdt(ptr);
4487 	break;
4488 
4489     case IPCOP_shmget:
4490 	/* IPC_* flag values are the same on all linux platforms */
4491 	ret = get_errno(shmget(first, second, third));
4492 	break;
4493 
4494 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4495     case IPCOP_shmctl:
4496         ret = do_shmctl(first, second, ptr);
4497         break;
4498     default:
4499         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4500                       call, version);
4501 	ret = -TARGET_ENOSYS;
4502 	break;
4503     }
4504     return ret;
4505 }
4506 #endif
4507 
4508 /* kernel structure types definitions */
4509 
4510 #define STRUCT(name, ...) STRUCT_ ## name,
4511 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4512 enum {
4513 #include "syscall_types.h"
4514 STRUCT_MAX
4515 };
4516 #undef STRUCT
4517 #undef STRUCT_SPECIAL
4518 
4519 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4520 #define STRUCT_SPECIAL(name)
4521 #include "syscall_types.h"
4522 #undef STRUCT
4523 #undef STRUCT_SPECIAL
4524 
4525 #define MAX_STRUCT_SIZE 4096
4526 
4527 #ifdef CONFIG_FIEMAP
4528 /* So fiemap access checks don't overflow on 32 bit systems.
4529  * This is very slightly smaller than the limit imposed by
4530  * the underlying kernel.
4531  */
4532 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4533                             / sizeof(struct fiemap_extent))
4534 
4535 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4536                                        int fd, int cmd, abi_long arg)
4537 {
4538     /* The parameter for this ioctl is a struct fiemap followed
4539      * by an array of struct fiemap_extent whose size is set
4540      * in fiemap->fm_extent_count. The array is filled in by the
4541      * ioctl.
4542      */
4543     int target_size_in, target_size_out;
4544     struct fiemap *fm;
4545     const argtype *arg_type = ie->arg_type;
4546     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4547     void *argptr, *p;
4548     abi_long ret;
4549     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4550     uint32_t outbufsz;
4551     int free_fm = 0;
4552 
4553     assert(arg_type[0] == TYPE_PTR);
4554     assert(ie->access == IOC_RW);
4555     arg_type++;
4556     target_size_in = thunk_type_size(arg_type, 0);
4557     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4558     if (!argptr) {
4559         return -TARGET_EFAULT;
4560     }
4561     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4562     unlock_user(argptr, arg, 0);
4563     fm = (struct fiemap *)buf_temp;
4564     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4565         return -TARGET_EINVAL;
4566     }
4567 
4568     outbufsz = sizeof (*fm) +
4569         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4570 
4571     if (outbufsz > MAX_STRUCT_SIZE) {
4572         /* We can't fit all the extents into the fixed size buffer.
4573          * Allocate one that is large enough and use it instead.
4574          */
4575         fm = g_try_malloc(outbufsz);
4576         if (!fm) {
4577             return -TARGET_ENOMEM;
4578         }
4579         memcpy(fm, buf_temp, sizeof(struct fiemap));
4580         free_fm = 1;
4581     }
4582     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4583     if (!is_error(ret)) {
4584         target_size_out = target_size_in;
4585         /* An extent_count of 0 means we were only counting the extents
4586          * so there are no structs to copy
4587          */
4588         if (fm->fm_extent_count != 0) {
4589             target_size_out += fm->fm_mapped_extents * extent_size;
4590         }
4591         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4592         if (!argptr) {
4593             ret = -TARGET_EFAULT;
4594         } else {
4595             /* Convert the struct fiemap */
4596             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4597             if (fm->fm_extent_count != 0) {
4598                 p = argptr + target_size_in;
4599                 /* ...and then all the struct fiemap_extents */
4600                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4601                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4602                                   THUNK_TARGET);
4603                     p += extent_size;
4604                 }
4605             }
4606             unlock_user(argptr, arg, target_size_out);
4607         }
4608     }
4609     if (free_fm) {
4610         g_free(fm);
4611     }
4612     return ret;
4613 }
4614 #endif
4615 
4616 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4617                                 int fd, int cmd, abi_long arg)
4618 {
4619     const argtype *arg_type = ie->arg_type;
4620     int target_size;
4621     void *argptr;
4622     int ret;
4623     struct ifconf *host_ifconf;
4624     uint32_t outbufsz;
4625     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4626     int target_ifreq_size;
4627     int nb_ifreq;
4628     int free_buf = 0;
4629     int i;
4630     int target_ifc_len;
4631     abi_long target_ifc_buf;
4632     int host_ifc_len;
4633     char *host_ifc_buf;
4634 
4635     assert(arg_type[0] == TYPE_PTR);
4636     assert(ie->access == IOC_RW);
4637 
4638     arg_type++;
4639     target_size = thunk_type_size(arg_type, 0);
4640 
4641     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4642     if (!argptr)
4643         return -TARGET_EFAULT;
4644     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4645     unlock_user(argptr, arg, 0);
4646 
4647     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4648     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4649     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4650 
4651     if (target_ifc_buf != 0) {
4652         target_ifc_len = host_ifconf->ifc_len;
4653         nb_ifreq = target_ifc_len / target_ifreq_size;
4654         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4655 
4656         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4657         if (outbufsz > MAX_STRUCT_SIZE) {
4658             /*
4659              * We can't fit all the extents into the fixed size buffer.
4660              * Allocate one that is large enough and use it instead.
4661              */
4662             host_ifconf = malloc(outbufsz);
4663             if (!host_ifconf) {
4664                 return -TARGET_ENOMEM;
4665             }
4666             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4667             free_buf = 1;
4668         }
4669         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4670 
4671         host_ifconf->ifc_len = host_ifc_len;
4672     } else {
4673       host_ifc_buf = NULL;
4674     }
4675     host_ifconf->ifc_buf = host_ifc_buf;
4676 
4677     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4678     if (!is_error(ret)) {
4679 	/* convert host ifc_len to target ifc_len */
4680 
4681         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4682         target_ifc_len = nb_ifreq * target_ifreq_size;
4683         host_ifconf->ifc_len = target_ifc_len;
4684 
4685 	/* restore target ifc_buf */
4686 
4687         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4688 
4689 	/* copy struct ifconf to target user */
4690 
4691         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4692         if (!argptr)
4693             return -TARGET_EFAULT;
4694         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4695         unlock_user(argptr, arg, target_size);
4696 
4697         if (target_ifc_buf != 0) {
4698             /* copy ifreq[] to target user */
4699             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4700             for (i = 0; i < nb_ifreq ; i++) {
4701                 thunk_convert(argptr + i * target_ifreq_size,
4702                               host_ifc_buf + i * sizeof(struct ifreq),
4703                               ifreq_arg_type, THUNK_TARGET);
4704             }
4705             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4706         }
4707     }
4708 
4709     if (free_buf) {
4710         free(host_ifconf);
4711     }
4712 
4713     return ret;
4714 }
4715 
4716 #if defined(CONFIG_USBFS)
4717 #if HOST_LONG_BITS > 64
4718 #error USBDEVFS thunks do not support >64 bit hosts yet.
4719 #endif
4720 struct live_urb {
4721     uint64_t target_urb_adr;
4722     uint64_t target_buf_adr;
4723     char *target_buf_ptr;
4724     struct usbdevfs_urb host_urb;
4725 };
4726 
4727 static GHashTable *usbdevfs_urb_hashtable(void)
4728 {
4729     static GHashTable *urb_hashtable;
4730 
4731     if (!urb_hashtable) {
4732         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4733     }
4734     return urb_hashtable;
4735 }
4736 
4737 static void urb_hashtable_insert(struct live_urb *urb)
4738 {
4739     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4740     g_hash_table_insert(urb_hashtable, urb, urb);
4741 }
4742 
4743 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4744 {
4745     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4746     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4747 }
4748 
4749 static void urb_hashtable_remove(struct live_urb *urb)
4750 {
4751     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4752     g_hash_table_remove(urb_hashtable, urb);
4753 }
4754 
4755 static abi_long
4756 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4757                           int fd, int cmd, abi_long arg)
4758 {
4759     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4760     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4761     struct live_urb *lurb;
4762     void *argptr;
4763     uint64_t hurb;
4764     int target_size;
4765     uintptr_t target_urb_adr;
4766     abi_long ret;
4767 
4768     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4769 
4770     memset(buf_temp, 0, sizeof(uint64_t));
4771     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4772     if (is_error(ret)) {
4773         return ret;
4774     }
4775 
4776     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4777     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4778     if (!lurb->target_urb_adr) {
4779         return -TARGET_EFAULT;
4780     }
4781     urb_hashtable_remove(lurb);
4782     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4783         lurb->host_urb.buffer_length);
4784     lurb->target_buf_ptr = NULL;
4785 
4786     /* restore the guest buffer pointer */
4787     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4788 
4789     /* update the guest urb struct */
4790     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4791     if (!argptr) {
4792         g_free(lurb);
4793         return -TARGET_EFAULT;
4794     }
4795     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4796     unlock_user(argptr, lurb->target_urb_adr, target_size);
4797 
4798     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4799     /* write back the urb handle */
4800     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4801     if (!argptr) {
4802         g_free(lurb);
4803         return -TARGET_EFAULT;
4804     }
4805 
4806     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4807     target_urb_adr = lurb->target_urb_adr;
4808     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4809     unlock_user(argptr, arg, target_size);
4810 
4811     g_free(lurb);
4812     return ret;
4813 }
4814 
4815 static abi_long
4816 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4817                              uint8_t *buf_temp __attribute__((unused)),
4818                              int fd, int cmd, abi_long arg)
4819 {
4820     struct live_urb *lurb;
4821 
4822     /* map target address back to host URB with metadata. */
4823     lurb = urb_hashtable_lookup(arg);
4824     if (!lurb) {
4825         return -TARGET_EFAULT;
4826     }
4827     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4828 }
4829 
4830 static abi_long
4831 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4832                             int fd, int cmd, abi_long arg)
4833 {
4834     const argtype *arg_type = ie->arg_type;
4835     int target_size;
4836     abi_long ret;
4837     void *argptr;
4838     int rw_dir;
4839     struct live_urb *lurb;
4840 
4841     /*
4842      * each submitted URB needs to map to a unique ID for the
4843      * kernel, and that unique ID needs to be a pointer to
4844      * host memory.  hence, we need to malloc for each URB.
4845      * isochronous transfers have a variable length struct.
4846      */
4847     arg_type++;
4848     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4849 
4850     /* construct host copy of urb and metadata */
4851     lurb = g_try_malloc0(sizeof(struct live_urb));
4852     if (!lurb) {
4853         return -TARGET_ENOMEM;
4854     }
4855 
4856     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4857     if (!argptr) {
4858         g_free(lurb);
4859         return -TARGET_EFAULT;
4860     }
4861     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4862     unlock_user(argptr, arg, 0);
4863 
4864     lurb->target_urb_adr = arg;
4865     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4866 
4867     /* buffer space used depends on endpoint type so lock the entire buffer */
4868     /* control type urbs should check the buffer contents for true direction */
4869     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4870     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4871         lurb->host_urb.buffer_length, 1);
4872     if (lurb->target_buf_ptr == NULL) {
4873         g_free(lurb);
4874         return -TARGET_EFAULT;
4875     }
4876 
4877     /* update buffer pointer in host copy */
4878     lurb->host_urb.buffer = lurb->target_buf_ptr;
4879 
4880     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4881     if (is_error(ret)) {
4882         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4883         g_free(lurb);
4884     } else {
4885         urb_hashtable_insert(lurb);
4886     }
4887 
4888     return ret;
4889 }
4890 #endif /* CONFIG_USBFS */
4891 
4892 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4893                             int cmd, abi_long arg)
4894 {
4895     void *argptr;
4896     struct dm_ioctl *host_dm;
4897     abi_long guest_data;
4898     uint32_t guest_data_size;
4899     int target_size;
4900     const argtype *arg_type = ie->arg_type;
4901     abi_long ret;
4902     void *big_buf = NULL;
4903     char *host_data;
4904 
4905     arg_type++;
4906     target_size = thunk_type_size(arg_type, 0);
4907     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4908     if (!argptr) {
4909         ret = -TARGET_EFAULT;
4910         goto out;
4911     }
4912     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4913     unlock_user(argptr, arg, 0);
4914 
4915     /* buf_temp is too small, so fetch things into a bigger buffer */
4916     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4917     memcpy(big_buf, buf_temp, target_size);
4918     buf_temp = big_buf;
4919     host_dm = big_buf;
4920 
4921     guest_data = arg + host_dm->data_start;
4922     if ((guest_data - arg) < 0) {
4923         ret = -TARGET_EINVAL;
4924         goto out;
4925     }
4926     guest_data_size = host_dm->data_size - host_dm->data_start;
4927     host_data = (char*)host_dm + host_dm->data_start;
4928 
4929     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4930     if (!argptr) {
4931         ret = -TARGET_EFAULT;
4932         goto out;
4933     }
4934 
4935     switch (ie->host_cmd) {
4936     case DM_REMOVE_ALL:
4937     case DM_LIST_DEVICES:
4938     case DM_DEV_CREATE:
4939     case DM_DEV_REMOVE:
4940     case DM_DEV_SUSPEND:
4941     case DM_DEV_STATUS:
4942     case DM_DEV_WAIT:
4943     case DM_TABLE_STATUS:
4944     case DM_TABLE_CLEAR:
4945     case DM_TABLE_DEPS:
4946     case DM_LIST_VERSIONS:
4947         /* no input data */
4948         break;
4949     case DM_DEV_RENAME:
4950     case DM_DEV_SET_GEOMETRY:
4951         /* data contains only strings */
4952         memcpy(host_data, argptr, guest_data_size);
4953         break;
4954     case DM_TARGET_MSG:
4955         memcpy(host_data, argptr, guest_data_size);
4956         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4957         break;
4958     case DM_TABLE_LOAD:
4959     {
4960         void *gspec = argptr;
4961         void *cur_data = host_data;
4962         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4963         int spec_size = thunk_type_size(arg_type, 0);
4964         int i;
4965 
4966         for (i = 0; i < host_dm->target_count; i++) {
4967             struct dm_target_spec *spec = cur_data;
4968             uint32_t next;
4969             int slen;
4970 
4971             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4972             slen = strlen((char*)gspec + spec_size) + 1;
4973             next = spec->next;
4974             spec->next = sizeof(*spec) + slen;
4975             strcpy((char*)&spec[1], gspec + spec_size);
4976             gspec += next;
4977             cur_data += spec->next;
4978         }
4979         break;
4980     }
4981     default:
4982         ret = -TARGET_EINVAL;
4983         unlock_user(argptr, guest_data, 0);
4984         goto out;
4985     }
4986     unlock_user(argptr, guest_data, 0);
4987 
4988     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4989     if (!is_error(ret)) {
4990         guest_data = arg + host_dm->data_start;
4991         guest_data_size = host_dm->data_size - host_dm->data_start;
4992         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4993         switch (ie->host_cmd) {
4994         case DM_REMOVE_ALL:
4995         case DM_DEV_CREATE:
4996         case DM_DEV_REMOVE:
4997         case DM_DEV_RENAME:
4998         case DM_DEV_SUSPEND:
4999         case DM_DEV_STATUS:
5000         case DM_TABLE_LOAD:
5001         case DM_TABLE_CLEAR:
5002         case DM_TARGET_MSG:
5003         case DM_DEV_SET_GEOMETRY:
5004             /* no return data */
5005             break;
5006         case DM_LIST_DEVICES:
5007         {
5008             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5009             uint32_t remaining_data = guest_data_size;
5010             void *cur_data = argptr;
5011             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5012             int nl_size = 12; /* can't use thunk_size due to alignment */
5013 
5014             while (1) {
5015                 uint32_t next = nl->next;
5016                 if (next) {
5017                     nl->next = nl_size + (strlen(nl->name) + 1);
5018                 }
5019                 if (remaining_data < nl->next) {
5020                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5021                     break;
5022                 }
5023                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5024                 strcpy(cur_data + nl_size, nl->name);
5025                 cur_data += nl->next;
5026                 remaining_data -= nl->next;
5027                 if (!next) {
5028                     break;
5029                 }
5030                 nl = (void*)nl + next;
5031             }
5032             break;
5033         }
5034         case DM_DEV_WAIT:
5035         case DM_TABLE_STATUS:
5036         {
5037             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5038             void *cur_data = argptr;
5039             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5040             int spec_size = thunk_type_size(arg_type, 0);
5041             int i;
5042 
5043             for (i = 0; i < host_dm->target_count; i++) {
5044                 uint32_t next = spec->next;
5045                 int slen = strlen((char*)&spec[1]) + 1;
5046                 spec->next = (cur_data - argptr) + spec_size + slen;
5047                 if (guest_data_size < spec->next) {
5048                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5049                     break;
5050                 }
5051                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5052                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5053                 cur_data = argptr + spec->next;
5054                 spec = (void*)host_dm + host_dm->data_start + next;
5055             }
5056             break;
5057         }
5058         case DM_TABLE_DEPS:
5059         {
5060             void *hdata = (void*)host_dm + host_dm->data_start;
5061             int count = *(uint32_t*)hdata;
5062             uint64_t *hdev = hdata + 8;
5063             uint64_t *gdev = argptr + 8;
5064             int i;
5065 
5066             *(uint32_t*)argptr = tswap32(count);
5067             for (i = 0; i < count; i++) {
5068                 *gdev = tswap64(*hdev);
5069                 gdev++;
5070                 hdev++;
5071             }
5072             break;
5073         }
5074         case DM_LIST_VERSIONS:
5075         {
5076             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5077             uint32_t remaining_data = guest_data_size;
5078             void *cur_data = argptr;
5079             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5080             int vers_size = thunk_type_size(arg_type, 0);
5081 
5082             while (1) {
5083                 uint32_t next = vers->next;
5084                 if (next) {
5085                     vers->next = vers_size + (strlen(vers->name) + 1);
5086                 }
5087                 if (remaining_data < vers->next) {
5088                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5089                     break;
5090                 }
5091                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5092                 strcpy(cur_data + vers_size, vers->name);
5093                 cur_data += vers->next;
5094                 remaining_data -= vers->next;
5095                 if (!next) {
5096                     break;
5097                 }
5098                 vers = (void*)vers + next;
5099             }
5100             break;
5101         }
5102         default:
5103             unlock_user(argptr, guest_data, 0);
5104             ret = -TARGET_EINVAL;
5105             goto out;
5106         }
5107         unlock_user(argptr, guest_data, guest_data_size);
5108 
5109         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5110         if (!argptr) {
5111             ret = -TARGET_EFAULT;
5112             goto out;
5113         }
5114         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5115         unlock_user(argptr, arg, target_size);
5116     }
5117 out:
5118     g_free(big_buf);
5119     return ret;
5120 }
5121 
5122 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5123                                int cmd, abi_long arg)
5124 {
5125     void *argptr;
5126     int target_size;
5127     const argtype *arg_type = ie->arg_type;
5128     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5129     abi_long ret;
5130 
5131     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5132     struct blkpg_partition host_part;
5133 
5134     /* Read and convert blkpg */
5135     arg_type++;
5136     target_size = thunk_type_size(arg_type, 0);
5137     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5143     unlock_user(argptr, arg, 0);
5144 
5145     switch (host_blkpg->op) {
5146     case BLKPG_ADD_PARTITION:
5147     case BLKPG_DEL_PARTITION:
5148         /* payload is struct blkpg_partition */
5149         break;
5150     default:
5151         /* Unknown opcode */
5152         ret = -TARGET_EINVAL;
5153         goto out;
5154     }
5155 
5156     /* Read and convert blkpg->data */
5157     arg = (abi_long)(uintptr_t)host_blkpg->data;
5158     target_size = thunk_type_size(part_arg_type, 0);
5159     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5160     if (!argptr) {
5161         ret = -TARGET_EFAULT;
5162         goto out;
5163     }
5164     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5165     unlock_user(argptr, arg, 0);
5166 
5167     /* Swizzle the data pointer to our local copy and call! */
5168     host_blkpg->data = &host_part;
5169     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5170 
5171 out:
5172     return ret;
5173 }
5174 
5175 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5176                                 int fd, int cmd, abi_long arg)
5177 {
5178     const argtype *arg_type = ie->arg_type;
5179     const StructEntry *se;
5180     const argtype *field_types;
5181     const int *dst_offsets, *src_offsets;
5182     int target_size;
5183     void *argptr;
5184     abi_ulong *target_rt_dev_ptr = NULL;
5185     unsigned long *host_rt_dev_ptr = NULL;
5186     abi_long ret;
5187     int i;
5188 
5189     assert(ie->access == IOC_W);
5190     assert(*arg_type == TYPE_PTR);
5191     arg_type++;
5192     assert(*arg_type == TYPE_STRUCT);
5193     target_size = thunk_type_size(arg_type, 0);
5194     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5195     if (!argptr) {
5196         return -TARGET_EFAULT;
5197     }
5198     arg_type++;
5199     assert(*arg_type == (int)STRUCT_rtentry);
5200     se = struct_entries + *arg_type++;
5201     assert(se->convert[0] == NULL);
5202     /* convert struct here to be able to catch rt_dev string */
5203     field_types = se->field_types;
5204     dst_offsets = se->field_offsets[THUNK_HOST];
5205     src_offsets = se->field_offsets[THUNK_TARGET];
5206     for (i = 0; i < se->nb_fields; i++) {
5207         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5208             assert(*field_types == TYPE_PTRVOID);
5209             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5210             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5211             if (*target_rt_dev_ptr != 0) {
5212                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5213                                                   tswapal(*target_rt_dev_ptr));
5214                 if (!*host_rt_dev_ptr) {
5215                     unlock_user(argptr, arg, 0);
5216                     return -TARGET_EFAULT;
5217                 }
5218             } else {
5219                 *host_rt_dev_ptr = 0;
5220             }
5221             field_types++;
5222             continue;
5223         }
5224         field_types = thunk_convert(buf_temp + dst_offsets[i],
5225                                     argptr + src_offsets[i],
5226                                     field_types, THUNK_HOST);
5227     }
5228     unlock_user(argptr, arg, 0);
5229 
5230     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5231 
5232     assert(host_rt_dev_ptr != NULL);
5233     assert(target_rt_dev_ptr != NULL);
5234     if (*host_rt_dev_ptr != 0) {
5235         unlock_user((void *)*host_rt_dev_ptr,
5236                     *target_rt_dev_ptr, 0);
5237     }
5238     return ret;
5239 }
5240 
5241 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5242                                      int fd, int cmd, abi_long arg)
5243 {
5244     int sig = target_to_host_signal(arg);
5245     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5246 }
5247 
5248 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5249                                     int fd, int cmd, abi_long arg)
5250 {
5251     struct timeval tv;
5252     abi_long ret;
5253 
5254     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5255     if (is_error(ret)) {
5256         return ret;
5257     }
5258 
5259     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5260         if (copy_to_user_timeval(arg, &tv)) {
5261             return -TARGET_EFAULT;
5262         }
5263     } else {
5264         if (copy_to_user_timeval64(arg, &tv)) {
5265             return -TARGET_EFAULT;
5266         }
5267     }
5268 
5269     return ret;
5270 }
5271 
5272 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5273                                       int fd, int cmd, abi_long arg)
5274 {
5275     struct timespec ts;
5276     abi_long ret;
5277 
5278     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5279     if (is_error(ret)) {
5280         return ret;
5281     }
5282 
5283     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5284         if (host_to_target_timespec(arg, &ts)) {
5285             return -TARGET_EFAULT;
5286         }
5287     } else{
5288         if (host_to_target_timespec64(arg, &ts)) {
5289             return -TARGET_EFAULT;
5290         }
5291     }
5292 
5293     return ret;
5294 }
5295 
5296 #ifdef TIOCGPTPEER
5297 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5298                                      int fd, int cmd, abi_long arg)
5299 {
5300     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5301     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5302 }
5303 #endif
5304 
5305 #ifdef HAVE_DRM_H
5306 
5307 static void unlock_drm_version(struct drm_version *host_ver,
5308                                struct target_drm_version *target_ver,
5309                                bool copy)
5310 {
5311     unlock_user(host_ver->name, target_ver->name,
5312                                 copy ? host_ver->name_len : 0);
5313     unlock_user(host_ver->date, target_ver->date,
5314                                 copy ? host_ver->date_len : 0);
5315     unlock_user(host_ver->desc, target_ver->desc,
5316                                 copy ? host_ver->desc_len : 0);
5317 }
5318 
5319 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5320                                           struct target_drm_version *target_ver)
5321 {
5322     memset(host_ver, 0, sizeof(*host_ver));
5323 
5324     __get_user(host_ver->name_len, &target_ver->name_len);
5325     if (host_ver->name_len) {
5326         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5327                                    target_ver->name_len, 0);
5328         if (!host_ver->name) {
5329             return -EFAULT;
5330         }
5331     }
5332 
5333     __get_user(host_ver->date_len, &target_ver->date_len);
5334     if (host_ver->date_len) {
5335         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5336                                    target_ver->date_len, 0);
5337         if (!host_ver->date) {
5338             goto err;
5339         }
5340     }
5341 
5342     __get_user(host_ver->desc_len, &target_ver->desc_len);
5343     if (host_ver->desc_len) {
5344         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5345                                    target_ver->desc_len, 0);
5346         if (!host_ver->desc) {
5347             goto err;
5348         }
5349     }
5350 
5351     return 0;
5352 err:
5353     unlock_drm_version(host_ver, target_ver, false);
5354     return -EFAULT;
5355 }
5356 
5357 static inline void host_to_target_drmversion(
5358                                           struct target_drm_version *target_ver,
5359                                           struct drm_version *host_ver)
5360 {
5361     __put_user(host_ver->version_major, &target_ver->version_major);
5362     __put_user(host_ver->version_minor, &target_ver->version_minor);
5363     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5364     __put_user(host_ver->name_len, &target_ver->name_len);
5365     __put_user(host_ver->date_len, &target_ver->date_len);
5366     __put_user(host_ver->desc_len, &target_ver->desc_len);
5367     unlock_drm_version(host_ver, target_ver, true);
5368 }
5369 
5370 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5371                              int fd, int cmd, abi_long arg)
5372 {
5373     struct drm_version *ver;
5374     struct target_drm_version *target_ver;
5375     abi_long ret;
5376 
5377     switch (ie->host_cmd) {
5378     case DRM_IOCTL_VERSION:
5379         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5380             return -TARGET_EFAULT;
5381         }
5382         ver = (struct drm_version *)buf_temp;
5383         ret = target_to_host_drmversion(ver, target_ver);
5384         if (!is_error(ret)) {
5385             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5386             if (is_error(ret)) {
5387                 unlock_drm_version(ver, target_ver, false);
5388             } else {
5389                 host_to_target_drmversion(target_ver, ver);
5390             }
5391         }
5392         unlock_user_struct(target_ver, arg, 0);
5393         return ret;
5394     }
5395     return -TARGET_ENOSYS;
5396 }
5397 
5398 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5399                                            struct drm_i915_getparam *gparam,
5400                                            int fd, abi_long arg)
5401 {
5402     abi_long ret;
5403     int value;
5404     struct target_drm_i915_getparam *target_gparam;
5405 
5406     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5407         return -TARGET_EFAULT;
5408     }
5409 
5410     __get_user(gparam->param, &target_gparam->param);
5411     gparam->value = &value;
5412     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5413     put_user_s32(value, target_gparam->value);
5414 
5415     unlock_user_struct(target_gparam, arg, 0);
5416     return ret;
5417 }
5418 
5419 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5420                                   int fd, int cmd, abi_long arg)
5421 {
5422     switch (ie->host_cmd) {
5423     case DRM_IOCTL_I915_GETPARAM:
5424         return do_ioctl_drm_i915_getparam(ie,
5425                                           (struct drm_i915_getparam *)buf_temp,
5426                                           fd, arg);
5427     default:
5428         return -TARGET_ENOSYS;
5429     }
5430 }
5431 
5432 #endif
5433 
5434 IOCTLEntry ioctl_entries[] = {
5435 #define IOCTL(cmd, access, ...) \
5436     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5437 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5438     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5439 #define IOCTL_IGNORE(cmd) \
5440     { TARGET_ ## cmd, 0, #cmd },
5441 #include "ioctls.h"
5442     { 0, 0, },
5443 };
5444 
5445 /* ??? Implement proper locking for ioctls.  */
5446 /* do_ioctl() Must return target values and target errnos. */
5447 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5448 {
5449     const IOCTLEntry *ie;
5450     const argtype *arg_type;
5451     abi_long ret;
5452     uint8_t buf_temp[MAX_STRUCT_SIZE];
5453     int target_size;
5454     void *argptr;
5455 
5456     ie = ioctl_entries;
5457     for(;;) {
5458         if (ie->target_cmd == 0) {
5459             qemu_log_mask(
5460                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5461             return -TARGET_ENOSYS;
5462         }
5463         if (ie->target_cmd == cmd)
5464             break;
5465         ie++;
5466     }
5467     arg_type = ie->arg_type;
5468     if (ie->do_ioctl) {
5469         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5470     } else if (!ie->host_cmd) {
5471         /* Some architectures define BSD ioctls in their headers
5472            that are not implemented in Linux.  */
5473         return -TARGET_ENOSYS;
5474     }
5475 
5476     switch(arg_type[0]) {
5477     case TYPE_NULL:
5478         /* no argument */
5479         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5480         break;
5481     case TYPE_PTRVOID:
5482     case TYPE_INT:
5483     case TYPE_LONG:
5484     case TYPE_ULONG:
5485         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5486         break;
5487     case TYPE_PTR:
5488         arg_type++;
5489         target_size = thunk_type_size(arg_type, 0);
5490         switch(ie->access) {
5491         case IOC_R:
5492             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5493             if (!is_error(ret)) {
5494                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5495                 if (!argptr)
5496                     return -TARGET_EFAULT;
5497                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5498                 unlock_user(argptr, arg, target_size);
5499             }
5500             break;
5501         case IOC_W:
5502             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5503             if (!argptr)
5504                 return -TARGET_EFAULT;
5505             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5506             unlock_user(argptr, arg, 0);
5507             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5508             break;
5509         default:
5510         case IOC_RW:
5511             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5512             if (!argptr)
5513                 return -TARGET_EFAULT;
5514             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5515             unlock_user(argptr, arg, 0);
5516             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5517             if (!is_error(ret)) {
5518                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5519                 if (!argptr)
5520                     return -TARGET_EFAULT;
5521                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5522                 unlock_user(argptr, arg, target_size);
5523             }
5524             break;
5525         }
5526         break;
5527     default:
5528         qemu_log_mask(LOG_UNIMP,
5529                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5530                       (long)cmd, arg_type[0]);
5531         ret = -TARGET_ENOSYS;
5532         break;
5533     }
5534     return ret;
5535 }
5536 
5537 static const bitmask_transtbl iflag_tbl[] = {
5538         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5539         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5540         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5541         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5542         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5543         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5544         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5545         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5546         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5547         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5548         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5549         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5550         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5551         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5552         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5553         { 0, 0, 0, 0 }
5554 };
5555 
5556 static const bitmask_transtbl oflag_tbl[] = {
5557 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5558 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5559 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5560 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5561 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5562 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5563 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5564 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5565 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5566 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5567 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5568 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5569 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5570 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5571 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5572 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5573 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5574 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5575 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5576 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5577 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5578 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5579 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5580 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5581 	{ 0, 0, 0, 0 }
5582 };
5583 
5584 static const bitmask_transtbl cflag_tbl[] = {
5585 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5586 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5587 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5588 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5589 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5590 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5591 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5592 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5593 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5594 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5595 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5596 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5597 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5598 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5599 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5600 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5601 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5602 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5603 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5604 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5605 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5606 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5607 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5608 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5609 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5610 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5611 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5612 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5613 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5614 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5615 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5616 	{ 0, 0, 0, 0 }
5617 };
5618 
5619 static const bitmask_transtbl lflag_tbl[] = {
5620   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5621   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5622   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5623   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5624   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5625   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5626   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5627   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5628   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5629   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5630   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5631   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5632   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5633   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5634   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5635   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5636   { 0, 0, 0, 0 }
5637 };
5638 
5639 static void target_to_host_termios (void *dst, const void *src)
5640 {
5641     struct host_termios *host = dst;
5642     const struct target_termios *target = src;
5643 
5644     host->c_iflag =
5645         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5646     host->c_oflag =
5647         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5648     host->c_cflag =
5649         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5650     host->c_lflag =
5651         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5652     host->c_line = target->c_line;
5653 
5654     memset(host->c_cc, 0, sizeof(host->c_cc));
5655     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5656     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5657     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5658     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5659     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5660     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5661     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5662     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5663     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5664     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5665     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5666     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5667     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5668     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5669     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5670     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5671     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5672 }
5673 
5674 static void host_to_target_termios (void *dst, const void *src)
5675 {
5676     struct target_termios *target = dst;
5677     const struct host_termios *host = src;
5678 
5679     target->c_iflag =
5680         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5681     target->c_oflag =
5682         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5683     target->c_cflag =
5684         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5685     target->c_lflag =
5686         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5687     target->c_line = host->c_line;
5688 
5689     memset(target->c_cc, 0, sizeof(target->c_cc));
5690     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5691     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5692     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5693     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5694     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5695     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5696     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5697     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5698     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5699     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5700     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5701     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5702     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5703     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5704     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5705     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5706     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5707 }
5708 
5709 static const StructEntry struct_termios_def = {
5710     .convert = { host_to_target_termios, target_to_host_termios },
5711     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5712     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5713 };
5714 
5715 static bitmask_transtbl mmap_flags_tbl[] = {
5716     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5717     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5718     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5719     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5720       MAP_ANONYMOUS, MAP_ANONYMOUS },
5721     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5722       MAP_GROWSDOWN, MAP_GROWSDOWN },
5723     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5724       MAP_DENYWRITE, MAP_DENYWRITE },
5725     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5726       MAP_EXECUTABLE, MAP_EXECUTABLE },
5727     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5728     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5729       MAP_NORESERVE, MAP_NORESERVE },
5730     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5731     /* MAP_STACK had been ignored by the kernel for quite some time.
5732        Recognize it for the target insofar as we do not want to pass
5733        it through to the host.  */
5734     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5735     { 0, 0, 0, 0 }
5736 };
5737 
5738 /*
5739  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5740  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5741  */
5742 #if defined(TARGET_I386)
5743 
5744 /* NOTE: there is really one LDT for all the threads */
5745 static uint8_t *ldt_table;
5746 
5747 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5748 {
5749     int size;
5750     void *p;
5751 
5752     if (!ldt_table)
5753         return 0;
5754     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5755     if (size > bytecount)
5756         size = bytecount;
5757     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5758     if (!p)
5759         return -TARGET_EFAULT;
5760     /* ??? Should this by byteswapped?  */
5761     memcpy(p, ldt_table, size);
5762     unlock_user(p, ptr, size);
5763     return size;
5764 }
5765 
5766 /* XXX: add locking support */
5767 static abi_long write_ldt(CPUX86State *env,
5768                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5769 {
5770     struct target_modify_ldt_ldt_s ldt_info;
5771     struct target_modify_ldt_ldt_s *target_ldt_info;
5772     int seg_32bit, contents, read_exec_only, limit_in_pages;
5773     int seg_not_present, useable, lm;
5774     uint32_t *lp, entry_1, entry_2;
5775 
5776     if (bytecount != sizeof(ldt_info))
5777         return -TARGET_EINVAL;
5778     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5779         return -TARGET_EFAULT;
5780     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5781     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5782     ldt_info.limit = tswap32(target_ldt_info->limit);
5783     ldt_info.flags = tswap32(target_ldt_info->flags);
5784     unlock_user_struct(target_ldt_info, ptr, 0);
5785 
5786     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5787         return -TARGET_EINVAL;
5788     seg_32bit = ldt_info.flags & 1;
5789     contents = (ldt_info.flags >> 1) & 3;
5790     read_exec_only = (ldt_info.flags >> 3) & 1;
5791     limit_in_pages = (ldt_info.flags >> 4) & 1;
5792     seg_not_present = (ldt_info.flags >> 5) & 1;
5793     useable = (ldt_info.flags >> 6) & 1;
5794 #ifdef TARGET_ABI32
5795     lm = 0;
5796 #else
5797     lm = (ldt_info.flags >> 7) & 1;
5798 #endif
5799     if (contents == 3) {
5800         if (oldmode)
5801             return -TARGET_EINVAL;
5802         if (seg_not_present == 0)
5803             return -TARGET_EINVAL;
5804     }
5805     /* allocate the LDT */
5806     if (!ldt_table) {
5807         env->ldt.base = target_mmap(0,
5808                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5809                                     PROT_READ|PROT_WRITE,
5810                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5811         if (env->ldt.base == -1)
5812             return -TARGET_ENOMEM;
5813         memset(g2h(env->ldt.base), 0,
5814                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5815         env->ldt.limit = 0xffff;
5816         ldt_table = g2h(env->ldt.base);
5817     }
5818 
5819     /* NOTE: same code as Linux kernel */
5820     /* Allow LDTs to be cleared by the user. */
5821     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5822         if (oldmode ||
5823             (contents == 0		&&
5824              read_exec_only == 1	&&
5825              seg_32bit == 0		&&
5826              limit_in_pages == 0	&&
5827              seg_not_present == 1	&&
5828              useable == 0 )) {
5829             entry_1 = 0;
5830             entry_2 = 0;
5831             goto install;
5832         }
5833     }
5834 
5835     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5836         (ldt_info.limit & 0x0ffff);
5837     entry_2 = (ldt_info.base_addr & 0xff000000) |
5838         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5839         (ldt_info.limit & 0xf0000) |
5840         ((read_exec_only ^ 1) << 9) |
5841         (contents << 10) |
5842         ((seg_not_present ^ 1) << 15) |
5843         (seg_32bit << 22) |
5844         (limit_in_pages << 23) |
5845         (lm << 21) |
5846         0x7000;
5847     if (!oldmode)
5848         entry_2 |= (useable << 20);
5849 
5850     /* Install the new entry ...  */
5851 install:
5852     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5853     lp[0] = tswap32(entry_1);
5854     lp[1] = tswap32(entry_2);
5855     return 0;
5856 }
5857 
5858 /* specific and weird i386 syscalls */
5859 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5860                               unsigned long bytecount)
5861 {
5862     abi_long ret;
5863 
5864     switch (func) {
5865     case 0:
5866         ret = read_ldt(ptr, bytecount);
5867         break;
5868     case 1:
5869         ret = write_ldt(env, ptr, bytecount, 1);
5870         break;
5871     case 0x11:
5872         ret = write_ldt(env, ptr, bytecount, 0);
5873         break;
5874     default:
5875         ret = -TARGET_ENOSYS;
5876         break;
5877     }
5878     return ret;
5879 }
5880 
5881 #if defined(TARGET_ABI32)
5882 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5883 {
5884     uint64_t *gdt_table = g2h(env->gdt.base);
5885     struct target_modify_ldt_ldt_s ldt_info;
5886     struct target_modify_ldt_ldt_s *target_ldt_info;
5887     int seg_32bit, contents, read_exec_only, limit_in_pages;
5888     int seg_not_present, useable, lm;
5889     uint32_t *lp, entry_1, entry_2;
5890     int i;
5891 
5892     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5893     if (!target_ldt_info)
5894         return -TARGET_EFAULT;
5895     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5896     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5897     ldt_info.limit = tswap32(target_ldt_info->limit);
5898     ldt_info.flags = tswap32(target_ldt_info->flags);
5899     if (ldt_info.entry_number == -1) {
5900         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5901             if (gdt_table[i] == 0) {
5902                 ldt_info.entry_number = i;
5903                 target_ldt_info->entry_number = tswap32(i);
5904                 break;
5905             }
5906         }
5907     }
5908     unlock_user_struct(target_ldt_info, ptr, 1);
5909 
5910     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5911         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5912            return -TARGET_EINVAL;
5913     seg_32bit = ldt_info.flags & 1;
5914     contents = (ldt_info.flags >> 1) & 3;
5915     read_exec_only = (ldt_info.flags >> 3) & 1;
5916     limit_in_pages = (ldt_info.flags >> 4) & 1;
5917     seg_not_present = (ldt_info.flags >> 5) & 1;
5918     useable = (ldt_info.flags >> 6) & 1;
5919 #ifdef TARGET_ABI32
5920     lm = 0;
5921 #else
5922     lm = (ldt_info.flags >> 7) & 1;
5923 #endif
5924 
5925     if (contents == 3) {
5926         if (seg_not_present == 0)
5927             return -TARGET_EINVAL;
5928     }
5929 
5930     /* NOTE: same code as Linux kernel */
5931     /* Allow LDTs to be cleared by the user. */
5932     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5933         if ((contents == 0             &&
5934              read_exec_only == 1       &&
5935              seg_32bit == 0            &&
5936              limit_in_pages == 0       &&
5937              seg_not_present == 1      &&
5938              useable == 0 )) {
5939             entry_1 = 0;
5940             entry_2 = 0;
5941             goto install;
5942         }
5943     }
5944 
5945     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5946         (ldt_info.limit & 0x0ffff);
5947     entry_2 = (ldt_info.base_addr & 0xff000000) |
5948         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5949         (ldt_info.limit & 0xf0000) |
5950         ((read_exec_only ^ 1) << 9) |
5951         (contents << 10) |
5952         ((seg_not_present ^ 1) << 15) |
5953         (seg_32bit << 22) |
5954         (limit_in_pages << 23) |
5955         (useable << 20) |
5956         (lm << 21) |
5957         0x7000;
5958 
5959     /* Install the new entry ...  */
5960 install:
5961     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5962     lp[0] = tswap32(entry_1);
5963     lp[1] = tswap32(entry_2);
5964     return 0;
5965 }
5966 
5967 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5968 {
5969     struct target_modify_ldt_ldt_s *target_ldt_info;
5970     uint64_t *gdt_table = g2h(env->gdt.base);
5971     uint32_t base_addr, limit, flags;
5972     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5973     int seg_not_present, useable, lm;
5974     uint32_t *lp, entry_1, entry_2;
5975 
5976     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5977     if (!target_ldt_info)
5978         return -TARGET_EFAULT;
5979     idx = tswap32(target_ldt_info->entry_number);
5980     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5981         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5982         unlock_user_struct(target_ldt_info, ptr, 1);
5983         return -TARGET_EINVAL;
5984     }
5985     lp = (uint32_t *)(gdt_table + idx);
5986     entry_1 = tswap32(lp[0]);
5987     entry_2 = tswap32(lp[1]);
5988 
5989     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5990     contents = (entry_2 >> 10) & 3;
5991     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5992     seg_32bit = (entry_2 >> 22) & 1;
5993     limit_in_pages = (entry_2 >> 23) & 1;
5994     useable = (entry_2 >> 20) & 1;
5995 #ifdef TARGET_ABI32
5996     lm = 0;
5997 #else
5998     lm = (entry_2 >> 21) & 1;
5999 #endif
6000     flags = (seg_32bit << 0) | (contents << 1) |
6001         (read_exec_only << 3) | (limit_in_pages << 4) |
6002         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6003     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6004     base_addr = (entry_1 >> 16) |
6005         (entry_2 & 0xff000000) |
6006         ((entry_2 & 0xff) << 16);
6007     target_ldt_info->base_addr = tswapal(base_addr);
6008     target_ldt_info->limit = tswap32(limit);
6009     target_ldt_info->flags = tswap32(flags);
6010     unlock_user_struct(target_ldt_info, ptr, 1);
6011     return 0;
6012 }
6013 
6014 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6015 {
6016     return -TARGET_ENOSYS;
6017 }
6018 #else
6019 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6020 {
6021     abi_long ret = 0;
6022     abi_ulong val;
6023     int idx;
6024 
6025     switch(code) {
6026     case TARGET_ARCH_SET_GS:
6027     case TARGET_ARCH_SET_FS:
6028         if (code == TARGET_ARCH_SET_GS)
6029             idx = R_GS;
6030         else
6031             idx = R_FS;
6032         cpu_x86_load_seg(env, idx, 0);
6033         env->segs[idx].base = addr;
6034         break;
6035     case TARGET_ARCH_GET_GS:
6036     case TARGET_ARCH_GET_FS:
6037         if (code == TARGET_ARCH_GET_GS)
6038             idx = R_GS;
6039         else
6040             idx = R_FS;
6041         val = env->segs[idx].base;
6042         if (put_user(val, addr, abi_ulong))
6043             ret = -TARGET_EFAULT;
6044         break;
6045     default:
6046         ret = -TARGET_EINVAL;
6047         break;
6048     }
6049     return ret;
6050 }
6051 #endif /* defined(TARGET_ABI32 */
6052 
6053 #endif /* defined(TARGET_I386) */
6054 
6055 #define NEW_STACK_SIZE 0x40000
6056 
6057 
6058 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6059 typedef struct {
6060     CPUArchState *env;
6061     pthread_mutex_t mutex;
6062     pthread_cond_t cond;
6063     pthread_t thread;
6064     uint32_t tid;
6065     abi_ulong child_tidptr;
6066     abi_ulong parent_tidptr;
6067     sigset_t sigmask;
6068 } new_thread_info;
6069 
6070 static void *clone_func(void *arg)
6071 {
6072     new_thread_info *info = arg;
6073     CPUArchState *env;
6074     CPUState *cpu;
6075     TaskState *ts;
6076 
6077     rcu_register_thread();
6078     tcg_register_thread();
6079     env = info->env;
6080     cpu = env_cpu(env);
6081     thread_cpu = cpu;
6082     ts = (TaskState *)cpu->opaque;
6083     info->tid = sys_gettid();
6084     task_settid(ts);
6085     if (info->child_tidptr)
6086         put_user_u32(info->tid, info->child_tidptr);
6087     if (info->parent_tidptr)
6088         put_user_u32(info->tid, info->parent_tidptr);
6089     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6090     /* Enable signals.  */
6091     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6092     /* Signal to the parent that we're ready.  */
6093     pthread_mutex_lock(&info->mutex);
6094     pthread_cond_broadcast(&info->cond);
6095     pthread_mutex_unlock(&info->mutex);
6096     /* Wait until the parent has finished initializing the tls state.  */
6097     pthread_mutex_lock(&clone_lock);
6098     pthread_mutex_unlock(&clone_lock);
6099     cpu_loop(env);
6100     /* never exits */
6101     return NULL;
6102 }
6103 
6104 /* do_fork() Must return host values and target errnos (unlike most
6105    do_*() functions). */
6106 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6107                    abi_ulong parent_tidptr, target_ulong newtls,
6108                    abi_ulong child_tidptr)
6109 {
6110     CPUState *cpu = env_cpu(env);
6111     int ret;
6112     TaskState *ts;
6113     CPUState *new_cpu;
6114     CPUArchState *new_env;
6115     sigset_t sigmask;
6116 
6117     flags &= ~CLONE_IGNORED_FLAGS;
6118 
6119     /* Emulate vfork() with fork() */
6120     if (flags & CLONE_VFORK)
6121         flags &= ~(CLONE_VFORK | CLONE_VM);
6122 
6123     if (flags & CLONE_VM) {
6124         TaskState *parent_ts = (TaskState *)cpu->opaque;
6125         new_thread_info info;
6126         pthread_attr_t attr;
6127 
6128         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6129             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6130             return -TARGET_EINVAL;
6131         }
6132 
6133         ts = g_new0(TaskState, 1);
6134         init_task_state(ts);
6135 
6136         /* Grab a mutex so that thread setup appears atomic.  */
6137         pthread_mutex_lock(&clone_lock);
6138 
6139         /* we create a new CPU instance. */
6140         new_env = cpu_copy(env);
6141         /* Init regs that differ from the parent.  */
6142         cpu_clone_regs_child(new_env, newsp, flags);
6143         cpu_clone_regs_parent(env, flags);
6144         new_cpu = env_cpu(new_env);
6145         new_cpu->opaque = ts;
6146         ts->bprm = parent_ts->bprm;
6147         ts->info = parent_ts->info;
6148         ts->signal_mask = parent_ts->signal_mask;
6149 
6150         if (flags & CLONE_CHILD_CLEARTID) {
6151             ts->child_tidptr = child_tidptr;
6152         }
6153 
6154         if (flags & CLONE_SETTLS) {
6155             cpu_set_tls (new_env, newtls);
6156         }
6157 
6158         memset(&info, 0, sizeof(info));
6159         pthread_mutex_init(&info.mutex, NULL);
6160         pthread_mutex_lock(&info.mutex);
6161         pthread_cond_init(&info.cond, NULL);
6162         info.env = new_env;
6163         if (flags & CLONE_CHILD_SETTID) {
6164             info.child_tidptr = child_tidptr;
6165         }
6166         if (flags & CLONE_PARENT_SETTID) {
6167             info.parent_tidptr = parent_tidptr;
6168         }
6169 
6170         ret = pthread_attr_init(&attr);
6171         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6172         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6173         /* It is not safe to deliver signals until the child has finished
6174            initializing, so temporarily block all signals.  */
6175         sigfillset(&sigmask);
6176         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6177         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6178 
6179         /* If this is our first additional thread, we need to ensure we
6180          * generate code for parallel execution and flush old translations.
6181          */
6182         if (!parallel_cpus) {
6183             parallel_cpus = true;
6184             tb_flush(cpu);
6185         }
6186 
6187         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6188         /* TODO: Free new CPU state if thread creation failed.  */
6189 
6190         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6191         pthread_attr_destroy(&attr);
6192         if (ret == 0) {
6193             /* Wait for the child to initialize.  */
6194             pthread_cond_wait(&info.cond, &info.mutex);
6195             ret = info.tid;
6196         } else {
6197             ret = -1;
6198         }
6199         pthread_mutex_unlock(&info.mutex);
6200         pthread_cond_destroy(&info.cond);
6201         pthread_mutex_destroy(&info.mutex);
6202         pthread_mutex_unlock(&clone_lock);
6203     } else {
6204         /* if no CLONE_VM, we consider it is a fork */
6205         if (flags & CLONE_INVALID_FORK_FLAGS) {
6206             return -TARGET_EINVAL;
6207         }
6208 
6209         /* We can't support custom termination signals */
6210         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6211             return -TARGET_EINVAL;
6212         }
6213 
6214         if (block_signals()) {
6215             return -TARGET_ERESTARTSYS;
6216         }
6217 
6218         fork_start();
6219         ret = fork();
6220         if (ret == 0) {
6221             /* Child Process.  */
6222             cpu_clone_regs_child(env, newsp, flags);
6223             fork_end(1);
6224             /* There is a race condition here.  The parent process could
6225                theoretically read the TID in the child process before the child
6226                tid is set.  This would require using either ptrace
6227                (not implemented) or having *_tidptr to point at a shared memory
6228                mapping.  We can't repeat the spinlock hack used above because
6229                the child process gets its own copy of the lock.  */
6230             if (flags & CLONE_CHILD_SETTID)
6231                 put_user_u32(sys_gettid(), child_tidptr);
6232             if (flags & CLONE_PARENT_SETTID)
6233                 put_user_u32(sys_gettid(), parent_tidptr);
6234             ts = (TaskState *)cpu->opaque;
6235             if (flags & CLONE_SETTLS)
6236                 cpu_set_tls (env, newtls);
6237             if (flags & CLONE_CHILD_CLEARTID)
6238                 ts->child_tidptr = child_tidptr;
6239         } else {
6240             cpu_clone_regs_parent(env, flags);
6241             fork_end(0);
6242         }
6243     }
6244     return ret;
6245 }
6246 
6247 /* warning : doesn't handle linux specific flags... */
6248 static int target_to_host_fcntl_cmd(int cmd)
6249 {
6250     int ret;
6251 
6252     switch(cmd) {
6253     case TARGET_F_DUPFD:
6254     case TARGET_F_GETFD:
6255     case TARGET_F_SETFD:
6256     case TARGET_F_GETFL:
6257     case TARGET_F_SETFL:
6258     case TARGET_F_OFD_GETLK:
6259     case TARGET_F_OFD_SETLK:
6260     case TARGET_F_OFD_SETLKW:
6261         ret = cmd;
6262         break;
6263     case TARGET_F_GETLK:
6264         ret = F_GETLK64;
6265         break;
6266     case TARGET_F_SETLK:
6267         ret = F_SETLK64;
6268         break;
6269     case TARGET_F_SETLKW:
6270         ret = F_SETLKW64;
6271         break;
6272     case TARGET_F_GETOWN:
6273         ret = F_GETOWN;
6274         break;
6275     case TARGET_F_SETOWN:
6276         ret = F_SETOWN;
6277         break;
6278     case TARGET_F_GETSIG:
6279         ret = F_GETSIG;
6280         break;
6281     case TARGET_F_SETSIG:
6282         ret = F_SETSIG;
6283         break;
6284 #if TARGET_ABI_BITS == 32
6285     case TARGET_F_GETLK64:
6286         ret = F_GETLK64;
6287         break;
6288     case TARGET_F_SETLK64:
6289         ret = F_SETLK64;
6290         break;
6291     case TARGET_F_SETLKW64:
6292         ret = F_SETLKW64;
6293         break;
6294 #endif
6295     case TARGET_F_SETLEASE:
6296         ret = F_SETLEASE;
6297         break;
6298     case TARGET_F_GETLEASE:
6299         ret = F_GETLEASE;
6300         break;
6301 #ifdef F_DUPFD_CLOEXEC
6302     case TARGET_F_DUPFD_CLOEXEC:
6303         ret = F_DUPFD_CLOEXEC;
6304         break;
6305 #endif
6306     case TARGET_F_NOTIFY:
6307         ret = F_NOTIFY;
6308         break;
6309 #ifdef F_GETOWN_EX
6310     case TARGET_F_GETOWN_EX:
6311         ret = F_GETOWN_EX;
6312         break;
6313 #endif
6314 #ifdef F_SETOWN_EX
6315     case TARGET_F_SETOWN_EX:
6316         ret = F_SETOWN_EX;
6317         break;
6318 #endif
6319 #ifdef F_SETPIPE_SZ
6320     case TARGET_F_SETPIPE_SZ:
6321         ret = F_SETPIPE_SZ;
6322         break;
6323     case TARGET_F_GETPIPE_SZ:
6324         ret = F_GETPIPE_SZ;
6325         break;
6326 #endif
6327     default:
6328         ret = -TARGET_EINVAL;
6329         break;
6330     }
6331 
6332 #if defined(__powerpc64__)
6333     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6334      * is not supported by kernel. The glibc fcntl call actually adjusts
6335      * them to 5, 6 and 7 before making the syscall(). Since we make the
6336      * syscall directly, adjust to what is supported by the kernel.
6337      */
6338     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6339         ret -= F_GETLK64 - 5;
6340     }
6341 #endif
6342 
6343     return ret;
6344 }
6345 
6346 #define FLOCK_TRANSTBL \
6347     switch (type) { \
6348     TRANSTBL_CONVERT(F_RDLCK); \
6349     TRANSTBL_CONVERT(F_WRLCK); \
6350     TRANSTBL_CONVERT(F_UNLCK); \
6351     TRANSTBL_CONVERT(F_EXLCK); \
6352     TRANSTBL_CONVERT(F_SHLCK); \
6353     }
6354 
6355 static int target_to_host_flock(int type)
6356 {
6357 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6358     FLOCK_TRANSTBL
6359 #undef  TRANSTBL_CONVERT
6360     return -TARGET_EINVAL;
6361 }
6362 
6363 static int host_to_target_flock(int type)
6364 {
6365 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6366     FLOCK_TRANSTBL
6367 #undef  TRANSTBL_CONVERT
6368     /* if we don't know how to convert the value coming
6369      * from the host we copy to the target field as-is
6370      */
6371     return type;
6372 }
6373 
6374 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6375                                             abi_ulong target_flock_addr)
6376 {
6377     struct target_flock *target_fl;
6378     int l_type;
6379 
6380     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6381         return -TARGET_EFAULT;
6382     }
6383 
6384     __get_user(l_type, &target_fl->l_type);
6385     l_type = target_to_host_flock(l_type);
6386     if (l_type < 0) {
6387         return l_type;
6388     }
6389     fl->l_type = l_type;
6390     __get_user(fl->l_whence, &target_fl->l_whence);
6391     __get_user(fl->l_start, &target_fl->l_start);
6392     __get_user(fl->l_len, &target_fl->l_len);
6393     __get_user(fl->l_pid, &target_fl->l_pid);
6394     unlock_user_struct(target_fl, target_flock_addr, 0);
6395     return 0;
6396 }
6397 
6398 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6399                                           const struct flock64 *fl)
6400 {
6401     struct target_flock *target_fl;
6402     short l_type;
6403 
6404     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6405         return -TARGET_EFAULT;
6406     }
6407 
6408     l_type = host_to_target_flock(fl->l_type);
6409     __put_user(l_type, &target_fl->l_type);
6410     __put_user(fl->l_whence, &target_fl->l_whence);
6411     __put_user(fl->l_start, &target_fl->l_start);
6412     __put_user(fl->l_len, &target_fl->l_len);
6413     __put_user(fl->l_pid, &target_fl->l_pid);
6414     unlock_user_struct(target_fl, target_flock_addr, 1);
6415     return 0;
6416 }
6417 
6418 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6419 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6420 
6421 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6422 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6423                                                    abi_ulong target_flock_addr)
6424 {
6425     struct target_oabi_flock64 *target_fl;
6426     int l_type;
6427 
6428     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6429         return -TARGET_EFAULT;
6430     }
6431 
6432     __get_user(l_type, &target_fl->l_type);
6433     l_type = target_to_host_flock(l_type);
6434     if (l_type < 0) {
6435         return l_type;
6436     }
6437     fl->l_type = l_type;
6438     __get_user(fl->l_whence, &target_fl->l_whence);
6439     __get_user(fl->l_start, &target_fl->l_start);
6440     __get_user(fl->l_len, &target_fl->l_len);
6441     __get_user(fl->l_pid, &target_fl->l_pid);
6442     unlock_user_struct(target_fl, target_flock_addr, 0);
6443     return 0;
6444 }
6445 
6446 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6447                                                  const struct flock64 *fl)
6448 {
6449     struct target_oabi_flock64 *target_fl;
6450     short l_type;
6451 
6452     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6453         return -TARGET_EFAULT;
6454     }
6455 
6456     l_type = host_to_target_flock(fl->l_type);
6457     __put_user(l_type, &target_fl->l_type);
6458     __put_user(fl->l_whence, &target_fl->l_whence);
6459     __put_user(fl->l_start, &target_fl->l_start);
6460     __put_user(fl->l_len, &target_fl->l_len);
6461     __put_user(fl->l_pid, &target_fl->l_pid);
6462     unlock_user_struct(target_fl, target_flock_addr, 1);
6463     return 0;
6464 }
6465 #endif
6466 
6467 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6468                                               abi_ulong target_flock_addr)
6469 {
6470     struct target_flock64 *target_fl;
6471     int l_type;
6472 
6473     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6474         return -TARGET_EFAULT;
6475     }
6476 
6477     __get_user(l_type, &target_fl->l_type);
6478     l_type = target_to_host_flock(l_type);
6479     if (l_type < 0) {
6480         return l_type;
6481     }
6482     fl->l_type = l_type;
6483     __get_user(fl->l_whence, &target_fl->l_whence);
6484     __get_user(fl->l_start, &target_fl->l_start);
6485     __get_user(fl->l_len, &target_fl->l_len);
6486     __get_user(fl->l_pid, &target_fl->l_pid);
6487     unlock_user_struct(target_fl, target_flock_addr, 0);
6488     return 0;
6489 }
6490 
6491 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6492                                             const struct flock64 *fl)
6493 {
6494     struct target_flock64 *target_fl;
6495     short l_type;
6496 
6497     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6498         return -TARGET_EFAULT;
6499     }
6500 
6501     l_type = host_to_target_flock(fl->l_type);
6502     __put_user(l_type, &target_fl->l_type);
6503     __put_user(fl->l_whence, &target_fl->l_whence);
6504     __put_user(fl->l_start, &target_fl->l_start);
6505     __put_user(fl->l_len, &target_fl->l_len);
6506     __put_user(fl->l_pid, &target_fl->l_pid);
6507     unlock_user_struct(target_fl, target_flock_addr, 1);
6508     return 0;
6509 }
6510 
6511 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6512 {
6513     struct flock64 fl64;
6514 #ifdef F_GETOWN_EX
6515     struct f_owner_ex fox;
6516     struct target_f_owner_ex *target_fox;
6517 #endif
6518     abi_long ret;
6519     int host_cmd = target_to_host_fcntl_cmd(cmd);
6520 
6521     if (host_cmd == -TARGET_EINVAL)
6522 	    return host_cmd;
6523 
6524     switch(cmd) {
6525     case TARGET_F_GETLK:
6526         ret = copy_from_user_flock(&fl64, arg);
6527         if (ret) {
6528             return ret;
6529         }
6530         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6531         if (ret == 0) {
6532             ret = copy_to_user_flock(arg, &fl64);
6533         }
6534         break;
6535 
6536     case TARGET_F_SETLK:
6537     case TARGET_F_SETLKW:
6538         ret = copy_from_user_flock(&fl64, arg);
6539         if (ret) {
6540             return ret;
6541         }
6542         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6543         break;
6544 
6545     case TARGET_F_GETLK64:
6546     case TARGET_F_OFD_GETLK:
6547         ret = copy_from_user_flock64(&fl64, arg);
6548         if (ret) {
6549             return ret;
6550         }
6551         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6552         if (ret == 0) {
6553             ret = copy_to_user_flock64(arg, &fl64);
6554         }
6555         break;
6556     case TARGET_F_SETLK64:
6557     case TARGET_F_SETLKW64:
6558     case TARGET_F_OFD_SETLK:
6559     case TARGET_F_OFD_SETLKW:
6560         ret = copy_from_user_flock64(&fl64, arg);
6561         if (ret) {
6562             return ret;
6563         }
6564         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6565         break;
6566 
6567     case TARGET_F_GETFL:
6568         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6569         if (ret >= 0) {
6570             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6571         }
6572         break;
6573 
6574     case TARGET_F_SETFL:
6575         ret = get_errno(safe_fcntl(fd, host_cmd,
6576                                    target_to_host_bitmask(arg,
6577                                                           fcntl_flags_tbl)));
6578         break;
6579 
6580 #ifdef F_GETOWN_EX
6581     case TARGET_F_GETOWN_EX:
6582         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6583         if (ret >= 0) {
6584             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6585                 return -TARGET_EFAULT;
6586             target_fox->type = tswap32(fox.type);
6587             target_fox->pid = tswap32(fox.pid);
6588             unlock_user_struct(target_fox, arg, 1);
6589         }
6590         break;
6591 #endif
6592 
6593 #ifdef F_SETOWN_EX
6594     case TARGET_F_SETOWN_EX:
6595         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6596             return -TARGET_EFAULT;
6597         fox.type = tswap32(target_fox->type);
6598         fox.pid = tswap32(target_fox->pid);
6599         unlock_user_struct(target_fox, arg, 0);
6600         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6601         break;
6602 #endif
6603 
6604     case TARGET_F_SETOWN:
6605     case TARGET_F_GETOWN:
6606     case TARGET_F_SETSIG:
6607     case TARGET_F_GETSIG:
6608     case TARGET_F_SETLEASE:
6609     case TARGET_F_GETLEASE:
6610     case TARGET_F_SETPIPE_SZ:
6611     case TARGET_F_GETPIPE_SZ:
6612         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6613         break;
6614 
6615     default:
6616         ret = get_errno(safe_fcntl(fd, cmd, arg));
6617         break;
6618     }
6619     return ret;
6620 }
6621 
6622 #ifdef USE_UID16
6623 
6624 static inline int high2lowuid(int uid)
6625 {
6626     if (uid > 65535)
6627         return 65534;
6628     else
6629         return uid;
6630 }
6631 
6632 static inline int high2lowgid(int gid)
6633 {
6634     if (gid > 65535)
6635         return 65534;
6636     else
6637         return gid;
6638 }
6639 
6640 static inline int low2highuid(int uid)
6641 {
6642     if ((int16_t)uid == -1)
6643         return -1;
6644     else
6645         return uid;
6646 }
6647 
6648 static inline int low2highgid(int gid)
6649 {
6650     if ((int16_t)gid == -1)
6651         return -1;
6652     else
6653         return gid;
6654 }
6655 static inline int tswapid(int id)
6656 {
6657     return tswap16(id);
6658 }
6659 
6660 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6661 
6662 #else /* !USE_UID16 */
6663 static inline int high2lowuid(int uid)
6664 {
6665     return uid;
6666 }
6667 static inline int high2lowgid(int gid)
6668 {
6669     return gid;
6670 }
6671 static inline int low2highuid(int uid)
6672 {
6673     return uid;
6674 }
6675 static inline int low2highgid(int gid)
6676 {
6677     return gid;
6678 }
6679 static inline int tswapid(int id)
6680 {
6681     return tswap32(id);
6682 }
6683 
6684 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6685 
6686 #endif /* USE_UID16 */
6687 
6688 /* We must do direct syscalls for setting UID/GID, because we want to
6689  * implement the Linux system call semantics of "change only for this thread",
6690  * not the libc/POSIX semantics of "change for all threads in process".
6691  * (See http://ewontfix.com/17/ for more details.)
6692  * We use the 32-bit version of the syscalls if present; if it is not
6693  * then either the host architecture supports 32-bit UIDs natively with
6694  * the standard syscall, or the 16-bit UID is the best we can do.
6695  */
6696 #ifdef __NR_setuid32
6697 #define __NR_sys_setuid __NR_setuid32
6698 #else
6699 #define __NR_sys_setuid __NR_setuid
6700 #endif
6701 #ifdef __NR_setgid32
6702 #define __NR_sys_setgid __NR_setgid32
6703 #else
6704 #define __NR_sys_setgid __NR_setgid
6705 #endif
6706 #ifdef __NR_setresuid32
6707 #define __NR_sys_setresuid __NR_setresuid32
6708 #else
6709 #define __NR_sys_setresuid __NR_setresuid
6710 #endif
6711 #ifdef __NR_setresgid32
6712 #define __NR_sys_setresgid __NR_setresgid32
6713 #else
6714 #define __NR_sys_setresgid __NR_setresgid
6715 #endif
6716 
6717 _syscall1(int, sys_setuid, uid_t, uid)
6718 _syscall1(int, sys_setgid, gid_t, gid)
6719 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6720 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6721 
6722 void syscall_init(void)
6723 {
6724     IOCTLEntry *ie;
6725     const argtype *arg_type;
6726     int size;
6727     int i;
6728 
6729     thunk_init(STRUCT_MAX);
6730 
6731 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6732 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6733 #include "syscall_types.h"
6734 #undef STRUCT
6735 #undef STRUCT_SPECIAL
6736 
6737     /* Build target_to_host_errno_table[] table from
6738      * host_to_target_errno_table[]. */
6739     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6740         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6741     }
6742 
6743     /* we patch the ioctl size if necessary. We rely on the fact that
6744        no ioctl has all the bits at '1' in the size field */
6745     ie = ioctl_entries;
6746     while (ie->target_cmd != 0) {
6747         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6748             TARGET_IOC_SIZEMASK) {
6749             arg_type = ie->arg_type;
6750             if (arg_type[0] != TYPE_PTR) {
6751                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6752                         ie->target_cmd);
6753                 exit(1);
6754             }
6755             arg_type++;
6756             size = thunk_type_size(arg_type, 0);
6757             ie->target_cmd = (ie->target_cmd &
6758                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6759                 (size << TARGET_IOC_SIZESHIFT);
6760         }
6761 
6762         /* automatic consistency check if same arch */
6763 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6764     (defined(__x86_64__) && defined(TARGET_X86_64))
6765         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6766             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6767                     ie->name, ie->target_cmd, ie->host_cmd);
6768         }
6769 #endif
6770         ie++;
6771     }
6772 }
6773 
6774 #ifdef TARGET_NR_truncate64
6775 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6776                                          abi_long arg2,
6777                                          abi_long arg3,
6778                                          abi_long arg4)
6779 {
6780     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6781         arg2 = arg3;
6782         arg3 = arg4;
6783     }
6784     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6785 }
6786 #endif
6787 
6788 #ifdef TARGET_NR_ftruncate64
6789 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6790                                           abi_long arg2,
6791                                           abi_long arg3,
6792                                           abi_long arg4)
6793 {
6794     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6795         arg2 = arg3;
6796         arg3 = arg4;
6797     }
6798     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6799 }
6800 #endif
6801 
6802 #if defined(TARGET_NR_timer_settime) || \
6803     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6804 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6805                                                  abi_ulong target_addr)
6806 {
6807     if (target_to_host_timespec(&host_its->it_interval, target_addr +
6808                                 offsetof(struct target_itimerspec,
6809                                          it_interval)) ||
6810         target_to_host_timespec(&host_its->it_value, target_addr +
6811                                 offsetof(struct target_itimerspec,
6812                                          it_value))) {
6813         return -TARGET_EFAULT;
6814     }
6815 
6816     return 0;
6817 }
6818 #endif
6819 
6820 #if defined(TARGET_NR_timer_settime64) || \
6821     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6822 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6823                                                    abi_ulong target_addr)
6824 {
6825     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6826                                   offsetof(struct target__kernel_itimerspec,
6827                                            it_interval)) ||
6828         target_to_host_timespec64(&host_its->it_value, target_addr +
6829                                   offsetof(struct target__kernel_itimerspec,
6830                                            it_value))) {
6831         return -TARGET_EFAULT;
6832     }
6833 
6834     return 0;
6835 }
6836 #endif
6837 
6838 #if ((defined(TARGET_NR_timerfd_gettime) || \
6839       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6840       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6841 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6842                                                  struct itimerspec *host_its)
6843 {
6844     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6845                                                        it_interval),
6846                                 &host_its->it_interval) ||
6847         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6848                                                        it_value),
6849                                 &host_its->it_value)) {
6850         return -TARGET_EFAULT;
6851     }
6852     return 0;
6853 }
6854 #endif
6855 
6856 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6857       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6858       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6859 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6860                                                    struct itimerspec *host_its)
6861 {
6862     if (host_to_target_timespec64(target_addr +
6863                                   offsetof(struct target__kernel_itimerspec,
6864                                            it_interval),
6865                                   &host_its->it_interval) ||
6866         host_to_target_timespec64(target_addr +
6867                                   offsetof(struct target__kernel_itimerspec,
6868                                            it_value),
6869                                   &host_its->it_value)) {
6870         return -TARGET_EFAULT;
6871     }
6872     return 0;
6873 }
6874 #endif
6875 
6876 #if defined(TARGET_NR_adjtimex) || \
6877     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6878 static inline abi_long target_to_host_timex(struct timex *host_tx,
6879                                             abi_long target_addr)
6880 {
6881     struct target_timex *target_tx;
6882 
6883     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6884         return -TARGET_EFAULT;
6885     }
6886 
6887     __get_user(host_tx->modes, &target_tx->modes);
6888     __get_user(host_tx->offset, &target_tx->offset);
6889     __get_user(host_tx->freq, &target_tx->freq);
6890     __get_user(host_tx->maxerror, &target_tx->maxerror);
6891     __get_user(host_tx->esterror, &target_tx->esterror);
6892     __get_user(host_tx->status, &target_tx->status);
6893     __get_user(host_tx->constant, &target_tx->constant);
6894     __get_user(host_tx->precision, &target_tx->precision);
6895     __get_user(host_tx->tolerance, &target_tx->tolerance);
6896     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6897     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6898     __get_user(host_tx->tick, &target_tx->tick);
6899     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6900     __get_user(host_tx->jitter, &target_tx->jitter);
6901     __get_user(host_tx->shift, &target_tx->shift);
6902     __get_user(host_tx->stabil, &target_tx->stabil);
6903     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6904     __get_user(host_tx->calcnt, &target_tx->calcnt);
6905     __get_user(host_tx->errcnt, &target_tx->errcnt);
6906     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6907     __get_user(host_tx->tai, &target_tx->tai);
6908 
6909     unlock_user_struct(target_tx, target_addr, 0);
6910     return 0;
6911 }
6912 
6913 static inline abi_long host_to_target_timex(abi_long target_addr,
6914                                             struct timex *host_tx)
6915 {
6916     struct target_timex *target_tx;
6917 
6918     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6919         return -TARGET_EFAULT;
6920     }
6921 
6922     __put_user(host_tx->modes, &target_tx->modes);
6923     __put_user(host_tx->offset, &target_tx->offset);
6924     __put_user(host_tx->freq, &target_tx->freq);
6925     __put_user(host_tx->maxerror, &target_tx->maxerror);
6926     __put_user(host_tx->esterror, &target_tx->esterror);
6927     __put_user(host_tx->status, &target_tx->status);
6928     __put_user(host_tx->constant, &target_tx->constant);
6929     __put_user(host_tx->precision, &target_tx->precision);
6930     __put_user(host_tx->tolerance, &target_tx->tolerance);
6931     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6932     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6933     __put_user(host_tx->tick, &target_tx->tick);
6934     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6935     __put_user(host_tx->jitter, &target_tx->jitter);
6936     __put_user(host_tx->shift, &target_tx->shift);
6937     __put_user(host_tx->stabil, &target_tx->stabil);
6938     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6939     __put_user(host_tx->calcnt, &target_tx->calcnt);
6940     __put_user(host_tx->errcnt, &target_tx->errcnt);
6941     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6942     __put_user(host_tx->tai, &target_tx->tai);
6943 
6944     unlock_user_struct(target_tx, target_addr, 1);
6945     return 0;
6946 }
6947 #endif
6948 
6949 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6950                                                abi_ulong target_addr)
6951 {
6952     struct target_sigevent *target_sevp;
6953 
6954     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6955         return -TARGET_EFAULT;
6956     }
6957 
6958     /* This union is awkward on 64 bit systems because it has a 32 bit
6959      * integer and a pointer in it; we follow the conversion approach
6960      * used for handling sigval types in signal.c so the guest should get
6961      * the correct value back even if we did a 64 bit byteswap and it's
6962      * using the 32 bit integer.
6963      */
6964     host_sevp->sigev_value.sival_ptr =
6965         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6966     host_sevp->sigev_signo =
6967         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6968     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6969     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6970 
6971     unlock_user_struct(target_sevp, target_addr, 1);
6972     return 0;
6973 }
6974 
6975 #if defined(TARGET_NR_mlockall)
6976 static inline int target_to_host_mlockall_arg(int arg)
6977 {
6978     int result = 0;
6979 
6980     if (arg & TARGET_MCL_CURRENT) {
6981         result |= MCL_CURRENT;
6982     }
6983     if (arg & TARGET_MCL_FUTURE) {
6984         result |= MCL_FUTURE;
6985     }
6986 #ifdef MCL_ONFAULT
6987     if (arg & TARGET_MCL_ONFAULT) {
6988         result |= MCL_ONFAULT;
6989     }
6990 #endif
6991 
6992     return result;
6993 }
6994 #endif
6995 
6996 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6997      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6998      defined(TARGET_NR_newfstatat))
6999 static inline abi_long host_to_target_stat64(void *cpu_env,
7000                                              abi_ulong target_addr,
7001                                              struct stat *host_st)
7002 {
7003 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7004     if (((CPUARMState *)cpu_env)->eabi) {
7005         struct target_eabi_stat64 *target_st;
7006 
7007         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7008             return -TARGET_EFAULT;
7009         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7010         __put_user(host_st->st_dev, &target_st->st_dev);
7011         __put_user(host_st->st_ino, &target_st->st_ino);
7012 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7013         __put_user(host_st->st_ino, &target_st->__st_ino);
7014 #endif
7015         __put_user(host_st->st_mode, &target_st->st_mode);
7016         __put_user(host_st->st_nlink, &target_st->st_nlink);
7017         __put_user(host_st->st_uid, &target_st->st_uid);
7018         __put_user(host_st->st_gid, &target_st->st_gid);
7019         __put_user(host_st->st_rdev, &target_st->st_rdev);
7020         __put_user(host_st->st_size, &target_st->st_size);
7021         __put_user(host_st->st_blksize, &target_st->st_blksize);
7022         __put_user(host_st->st_blocks, &target_st->st_blocks);
7023         __put_user(host_st->st_atime, &target_st->target_st_atime);
7024         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7025         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7026 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7027         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7028         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7029         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7030 #endif
7031         unlock_user_struct(target_st, target_addr, 1);
7032     } else
7033 #endif
7034     {
7035 #if defined(TARGET_HAS_STRUCT_STAT64)
7036         struct target_stat64 *target_st;
7037 #else
7038         struct target_stat *target_st;
7039 #endif
7040 
7041         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7042             return -TARGET_EFAULT;
7043         memset(target_st, 0, sizeof(*target_st));
7044         __put_user(host_st->st_dev, &target_st->st_dev);
7045         __put_user(host_st->st_ino, &target_st->st_ino);
7046 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7047         __put_user(host_st->st_ino, &target_st->__st_ino);
7048 #endif
7049         __put_user(host_st->st_mode, &target_st->st_mode);
7050         __put_user(host_st->st_nlink, &target_st->st_nlink);
7051         __put_user(host_st->st_uid, &target_st->st_uid);
7052         __put_user(host_st->st_gid, &target_st->st_gid);
7053         __put_user(host_st->st_rdev, &target_st->st_rdev);
7054         /* XXX: better use of kernel struct */
7055         __put_user(host_st->st_size, &target_st->st_size);
7056         __put_user(host_st->st_blksize, &target_st->st_blksize);
7057         __put_user(host_st->st_blocks, &target_st->st_blocks);
7058         __put_user(host_st->st_atime, &target_st->target_st_atime);
7059         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7060         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7061 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7062         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7063         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7064         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7065 #endif
7066         unlock_user_struct(target_st, target_addr, 1);
7067     }
7068 
7069     return 0;
7070 }
7071 #endif
7072 
7073 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7074 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7075                                             abi_ulong target_addr)
7076 {
7077     struct target_statx *target_stx;
7078 
7079     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7080         return -TARGET_EFAULT;
7081     }
7082     memset(target_stx, 0, sizeof(*target_stx));
7083 
7084     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7085     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7086     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7087     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7088     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7089     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7090     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7091     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7092     __put_user(host_stx->stx_size, &target_stx->stx_size);
7093     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7094     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7095     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7096     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7097     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7098     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7099     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7100     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7101     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7102     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7103     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7104     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7105     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7106     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7107 
7108     unlock_user_struct(target_stx, target_addr, 1);
7109 
7110     return 0;
7111 }
7112 #endif
7113 
7114 static int do_sys_futex(int *uaddr, int op, int val,
7115                          const struct timespec *timeout, int *uaddr2,
7116                          int val3)
7117 {
7118 #if HOST_LONG_BITS == 64
7119 #if defined(__NR_futex)
7120     /* always a 64-bit time_t, it doesn't define _time64 version  */
7121     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7122 
7123 #endif
7124 #else /* HOST_LONG_BITS == 64 */
7125 #if defined(__NR_futex_time64)
7126     if (sizeof(timeout->tv_sec) == 8) {
7127         /* _time64 function on 32bit arch */
7128         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7129     }
7130 #endif
7131 #if defined(__NR_futex)
7132     /* old function on 32bit arch */
7133     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7134 #endif
7135 #endif /* HOST_LONG_BITS == 64 */
7136     g_assert_not_reached();
7137 }
7138 
7139 static int do_safe_futex(int *uaddr, int op, int val,
7140                          const struct timespec *timeout, int *uaddr2,
7141                          int val3)
7142 {
7143 #if HOST_LONG_BITS == 64
7144 #if defined(__NR_futex)
7145     /* always a 64-bit time_t, it doesn't define _time64 version  */
7146     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7147 #endif
7148 #else /* HOST_LONG_BITS == 64 */
7149 #if defined(__NR_futex_time64)
7150     if (sizeof(timeout->tv_sec) == 8) {
7151         /* _time64 function on 32bit arch */
7152         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7153                                            val3));
7154     }
7155 #endif
7156 #if defined(__NR_futex)
7157     /* old function on 32bit arch */
7158     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7159 #endif
7160 #endif /* HOST_LONG_BITS == 64 */
7161     return -TARGET_ENOSYS;
7162 }
7163 
7164 /* ??? Using host futex calls even when target atomic operations
7165    are not really atomic probably breaks things.  However implementing
7166    futexes locally would make futexes shared between multiple processes
7167    tricky.  However they're probably useless because guest atomic
7168    operations won't work either.  */
7169 #if defined(TARGET_NR_futex)
7170 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7171                     target_ulong uaddr2, int val3)
7172 {
7173     struct timespec ts, *pts;
7174     int base_op;
7175 
7176     /* ??? We assume FUTEX_* constants are the same on both host
7177        and target.  */
7178 #ifdef FUTEX_CMD_MASK
7179     base_op = op & FUTEX_CMD_MASK;
7180 #else
7181     base_op = op;
7182 #endif
7183     switch (base_op) {
7184     case FUTEX_WAIT:
7185     case FUTEX_WAIT_BITSET:
7186         if (timeout) {
7187             pts = &ts;
7188             target_to_host_timespec(pts, timeout);
7189         } else {
7190             pts = NULL;
7191         }
7192         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7193     case FUTEX_WAKE:
7194         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7195     case FUTEX_FD:
7196         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7197     case FUTEX_REQUEUE:
7198     case FUTEX_CMP_REQUEUE:
7199     case FUTEX_WAKE_OP:
7200         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7201            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7202            But the prototype takes a `struct timespec *'; insert casts
7203            to satisfy the compiler.  We do not need to tswap TIMEOUT
7204            since it's not compared to guest memory.  */
7205         pts = (struct timespec *)(uintptr_t) timeout;
7206         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7207                              (base_op == FUTEX_CMP_REQUEUE
7208                                       ? tswap32(val3)
7209                                       : val3));
7210     default:
7211         return -TARGET_ENOSYS;
7212     }
7213 }
7214 #endif
7215 
7216 #if defined(TARGET_NR_futex_time64)
7217 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7218                            target_ulong uaddr2, int val3)
7219 {
7220     struct timespec ts, *pts;
7221     int base_op;
7222 
7223     /* ??? We assume FUTEX_* constants are the same on both host
7224        and target.  */
7225 #ifdef FUTEX_CMD_MASK
7226     base_op = op & FUTEX_CMD_MASK;
7227 #else
7228     base_op = op;
7229 #endif
7230     switch (base_op) {
7231     case FUTEX_WAIT:
7232     case FUTEX_WAIT_BITSET:
7233         if (timeout) {
7234             pts = &ts;
7235             target_to_host_timespec64(pts, timeout);
7236         } else {
7237             pts = NULL;
7238         }
7239         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7240     case FUTEX_WAKE:
7241         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7242     case FUTEX_FD:
7243         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7244     case FUTEX_REQUEUE:
7245     case FUTEX_CMP_REQUEUE:
7246     case FUTEX_WAKE_OP:
7247         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7248            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7249            But the prototype takes a `struct timespec *'; insert casts
7250            to satisfy the compiler.  We do not need to tswap TIMEOUT
7251            since it's not compared to guest memory.  */
7252         pts = (struct timespec *)(uintptr_t) timeout;
7253         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7254                              (base_op == FUTEX_CMP_REQUEUE
7255                                       ? tswap32(val3)
7256                                       : val3));
7257     default:
7258         return -TARGET_ENOSYS;
7259     }
7260 }
7261 #endif
7262 
7263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7264 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7265                                      abi_long handle, abi_long mount_id,
7266                                      abi_long flags)
7267 {
7268     struct file_handle *target_fh;
7269     struct file_handle *fh;
7270     int mid = 0;
7271     abi_long ret;
7272     char *name;
7273     unsigned int size, total_size;
7274 
7275     if (get_user_s32(size, handle)) {
7276         return -TARGET_EFAULT;
7277     }
7278 
7279     name = lock_user_string(pathname);
7280     if (!name) {
7281         return -TARGET_EFAULT;
7282     }
7283 
7284     total_size = sizeof(struct file_handle) + size;
7285     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7286     if (!target_fh) {
7287         unlock_user(name, pathname, 0);
7288         return -TARGET_EFAULT;
7289     }
7290 
7291     fh = g_malloc0(total_size);
7292     fh->handle_bytes = size;
7293 
7294     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7295     unlock_user(name, pathname, 0);
7296 
7297     /* man name_to_handle_at(2):
7298      * Other than the use of the handle_bytes field, the caller should treat
7299      * the file_handle structure as an opaque data type
7300      */
7301 
7302     memcpy(target_fh, fh, total_size);
7303     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7304     target_fh->handle_type = tswap32(fh->handle_type);
7305     g_free(fh);
7306     unlock_user(target_fh, handle, total_size);
7307 
7308     if (put_user_s32(mid, mount_id)) {
7309         return -TARGET_EFAULT;
7310     }
7311 
7312     return ret;
7313 
7314 }
7315 #endif
7316 
7317 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7318 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7319                                      abi_long flags)
7320 {
7321     struct file_handle *target_fh;
7322     struct file_handle *fh;
7323     unsigned int size, total_size;
7324     abi_long ret;
7325 
7326     if (get_user_s32(size, handle)) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     total_size = sizeof(struct file_handle) + size;
7331     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7332     if (!target_fh) {
7333         return -TARGET_EFAULT;
7334     }
7335 
7336     fh = g_memdup(target_fh, total_size);
7337     fh->handle_bytes = size;
7338     fh->handle_type = tswap32(target_fh->handle_type);
7339 
7340     ret = get_errno(open_by_handle_at(mount_fd, fh,
7341                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7342 
7343     g_free(fh);
7344 
7345     unlock_user(target_fh, handle, total_size);
7346 
7347     return ret;
7348 }
7349 #endif
7350 
7351 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7352 
7353 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7354 {
7355     int host_flags;
7356     target_sigset_t *target_mask;
7357     sigset_t host_mask;
7358     abi_long ret;
7359 
7360     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7361         return -TARGET_EINVAL;
7362     }
7363     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7364         return -TARGET_EFAULT;
7365     }
7366 
7367     target_to_host_sigset(&host_mask, target_mask);
7368 
7369     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7370 
7371     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7372     if (ret >= 0) {
7373         fd_trans_register(ret, &target_signalfd_trans);
7374     }
7375 
7376     unlock_user_struct(target_mask, mask, 0);
7377 
7378     return ret;
7379 }
7380 #endif
7381 
7382 /* Map host to target signal numbers for the wait family of syscalls.
7383    Assume all other status bits are the same.  */
7384 int host_to_target_waitstatus(int status)
7385 {
7386     if (WIFSIGNALED(status)) {
7387         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7388     }
7389     if (WIFSTOPPED(status)) {
7390         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7391                | (status & 0xff);
7392     }
7393     return status;
7394 }
7395 
7396 static int open_self_cmdline(void *cpu_env, int fd)
7397 {
7398     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7399     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7400     int i;
7401 
7402     for (i = 0; i < bprm->argc; i++) {
7403         size_t len = strlen(bprm->argv[i]) + 1;
7404 
7405         if (write(fd, bprm->argv[i], len) != len) {
7406             return -1;
7407         }
7408     }
7409 
7410     return 0;
7411 }
7412 
7413 static int open_self_maps(void *cpu_env, int fd)
7414 {
7415     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7416     TaskState *ts = cpu->opaque;
7417     GSList *map_info = read_self_maps();
7418     GSList *s;
7419     int count;
7420 
7421     for (s = map_info; s; s = g_slist_next(s)) {
7422         MapInfo *e = (MapInfo *) s->data;
7423 
7424         if (h2g_valid(e->start)) {
7425             unsigned long min = e->start;
7426             unsigned long max = e->end;
7427             int flags = page_get_flags(h2g(min));
7428             const char *path;
7429 
7430             max = h2g_valid(max - 1) ?
7431                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7432 
7433             if (page_check_range(h2g(min), max - min, flags) == -1) {
7434                 continue;
7435             }
7436 
7437             if (h2g(min) == ts->info->stack_limit) {
7438                 path = "[stack]";
7439             } else {
7440                 path = e->path;
7441             }
7442 
7443             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7444                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7445                             h2g(min), h2g(max - 1) + 1,
7446                             e->is_read ? 'r' : '-',
7447                             e->is_write ? 'w' : '-',
7448                             e->is_exec ? 'x' : '-',
7449                             e->is_priv ? 'p' : '-',
7450                             (uint64_t) e->offset, e->dev, e->inode);
7451             if (path) {
7452                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7453             } else {
7454                 dprintf(fd, "\n");
7455             }
7456         }
7457     }
7458 
7459     free_self_maps(map_info);
7460 
7461 #ifdef TARGET_VSYSCALL_PAGE
7462     /*
7463      * We only support execution from the vsyscall page.
7464      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7465      */
7466     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7467                     " --xp 00000000 00:00 0",
7468                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7469     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7470 #endif
7471 
7472     return 0;
7473 }
7474 
7475 static int open_self_stat(void *cpu_env, int fd)
7476 {
7477     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7478     TaskState *ts = cpu->opaque;
7479     g_autoptr(GString) buf = g_string_new(NULL);
7480     int i;
7481 
7482     for (i = 0; i < 44; i++) {
7483         if (i == 0) {
7484             /* pid */
7485             g_string_printf(buf, FMT_pid " ", getpid());
7486         } else if (i == 1) {
7487             /* app name */
7488             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7489             bin = bin ? bin + 1 : ts->bprm->argv[0];
7490             g_string_printf(buf, "(%.15s) ", bin);
7491         } else if (i == 27) {
7492             /* stack bottom */
7493             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7494         } else {
7495             /* for the rest, there is MasterCard */
7496             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7497         }
7498 
7499         if (write(fd, buf->str, buf->len) != buf->len) {
7500             return -1;
7501         }
7502     }
7503 
7504     return 0;
7505 }
7506 
7507 static int open_self_auxv(void *cpu_env, int fd)
7508 {
7509     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7510     TaskState *ts = cpu->opaque;
7511     abi_ulong auxv = ts->info->saved_auxv;
7512     abi_ulong len = ts->info->auxv_len;
7513     char *ptr;
7514 
7515     /*
7516      * Auxiliary vector is stored in target process stack.
7517      * read in whole auxv vector and copy it to file
7518      */
7519     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7520     if (ptr != NULL) {
7521         while (len > 0) {
7522             ssize_t r;
7523             r = write(fd, ptr, len);
7524             if (r <= 0) {
7525                 break;
7526             }
7527             len -= r;
7528             ptr += r;
7529         }
7530         lseek(fd, 0, SEEK_SET);
7531         unlock_user(ptr, auxv, len);
7532     }
7533 
7534     return 0;
7535 }
7536 
7537 static int is_proc_myself(const char *filename, const char *entry)
7538 {
7539     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7540         filename += strlen("/proc/");
7541         if (!strncmp(filename, "self/", strlen("self/"))) {
7542             filename += strlen("self/");
7543         } else if (*filename >= '1' && *filename <= '9') {
7544             char myself[80];
7545             snprintf(myself, sizeof(myself), "%d/", getpid());
7546             if (!strncmp(filename, myself, strlen(myself))) {
7547                 filename += strlen(myself);
7548             } else {
7549                 return 0;
7550             }
7551         } else {
7552             return 0;
7553         }
7554         if (!strcmp(filename, entry)) {
7555             return 1;
7556         }
7557     }
7558     return 0;
7559 }
7560 
7561 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7562     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7563 static int is_proc(const char *filename, const char *entry)
7564 {
7565     return strcmp(filename, entry) == 0;
7566 }
7567 #endif
7568 
7569 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7570 static int open_net_route(void *cpu_env, int fd)
7571 {
7572     FILE *fp;
7573     char *line = NULL;
7574     size_t len = 0;
7575     ssize_t read;
7576 
7577     fp = fopen("/proc/net/route", "r");
7578     if (fp == NULL) {
7579         return -1;
7580     }
7581 
7582     /* read header */
7583 
7584     read = getline(&line, &len, fp);
7585     dprintf(fd, "%s", line);
7586 
7587     /* read routes */
7588 
7589     while ((read = getline(&line, &len, fp)) != -1) {
7590         char iface[16];
7591         uint32_t dest, gw, mask;
7592         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7593         int fields;
7594 
7595         fields = sscanf(line,
7596                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7597                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7598                         &mask, &mtu, &window, &irtt);
7599         if (fields != 11) {
7600             continue;
7601         }
7602         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7603                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7604                 metric, tswap32(mask), mtu, window, irtt);
7605     }
7606 
7607     free(line);
7608     fclose(fp);
7609 
7610     return 0;
7611 }
7612 #endif
7613 
7614 #if defined(TARGET_SPARC)
7615 static int open_cpuinfo(void *cpu_env, int fd)
7616 {
7617     dprintf(fd, "type\t\t: sun4u\n");
7618     return 0;
7619 }
7620 #endif
7621 
7622 #if defined(TARGET_HPPA)
7623 static int open_cpuinfo(void *cpu_env, int fd)
7624 {
7625     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7626     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7627     dprintf(fd, "capabilities\t: os32\n");
7628     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7629     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7630     return 0;
7631 }
7632 #endif
7633 
7634 #if defined(TARGET_M68K)
7635 static int open_hardware(void *cpu_env, int fd)
7636 {
7637     dprintf(fd, "Model:\t\tqemu-m68k\n");
7638     return 0;
7639 }
7640 #endif
7641 
7642 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7643 {
7644     struct fake_open {
7645         const char *filename;
7646         int (*fill)(void *cpu_env, int fd);
7647         int (*cmp)(const char *s1, const char *s2);
7648     };
7649     const struct fake_open *fake_open;
7650     static const struct fake_open fakes[] = {
7651         { "maps", open_self_maps, is_proc_myself },
7652         { "stat", open_self_stat, is_proc_myself },
7653         { "auxv", open_self_auxv, is_proc_myself },
7654         { "cmdline", open_self_cmdline, is_proc_myself },
7655 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7656         { "/proc/net/route", open_net_route, is_proc },
7657 #endif
7658 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7659         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7660 #endif
7661 #if defined(TARGET_M68K)
7662         { "/proc/hardware", open_hardware, is_proc },
7663 #endif
7664         { NULL, NULL, NULL }
7665     };
7666 
7667     if (is_proc_myself(pathname, "exe")) {
7668         int execfd = qemu_getauxval(AT_EXECFD);
7669         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7670     }
7671 
7672     for (fake_open = fakes; fake_open->filename; fake_open++) {
7673         if (fake_open->cmp(pathname, fake_open->filename)) {
7674             break;
7675         }
7676     }
7677 
7678     if (fake_open->filename) {
7679         const char *tmpdir;
7680         char filename[PATH_MAX];
7681         int fd, r;
7682 
7683         /* create temporary file to map stat to */
7684         tmpdir = getenv("TMPDIR");
7685         if (!tmpdir)
7686             tmpdir = "/tmp";
7687         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7688         fd = mkstemp(filename);
7689         if (fd < 0) {
7690             return fd;
7691         }
7692         unlink(filename);
7693 
7694         if ((r = fake_open->fill(cpu_env, fd))) {
7695             int e = errno;
7696             close(fd);
7697             errno = e;
7698             return r;
7699         }
7700         lseek(fd, 0, SEEK_SET);
7701 
7702         return fd;
7703     }
7704 
7705     return safe_openat(dirfd, path(pathname), flags, mode);
7706 }
7707 
7708 #define TIMER_MAGIC 0x0caf0000
7709 #define TIMER_MAGIC_MASK 0xffff0000
7710 
7711 /* Convert QEMU provided timer ID back to internal 16bit index format */
7712 static target_timer_t get_timer_id(abi_long arg)
7713 {
7714     target_timer_t timerid = arg;
7715 
7716     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7717         return -TARGET_EINVAL;
7718     }
7719 
7720     timerid &= 0xffff;
7721 
7722     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7723         return -TARGET_EINVAL;
7724     }
7725 
7726     return timerid;
7727 }
7728 
7729 static int target_to_host_cpu_mask(unsigned long *host_mask,
7730                                    size_t host_size,
7731                                    abi_ulong target_addr,
7732                                    size_t target_size)
7733 {
7734     unsigned target_bits = sizeof(abi_ulong) * 8;
7735     unsigned host_bits = sizeof(*host_mask) * 8;
7736     abi_ulong *target_mask;
7737     unsigned i, j;
7738 
7739     assert(host_size >= target_size);
7740 
7741     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7742     if (!target_mask) {
7743         return -TARGET_EFAULT;
7744     }
7745     memset(host_mask, 0, host_size);
7746 
7747     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7748         unsigned bit = i * target_bits;
7749         abi_ulong val;
7750 
7751         __get_user(val, &target_mask[i]);
7752         for (j = 0; j < target_bits; j++, bit++) {
7753             if (val & (1UL << j)) {
7754                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7755             }
7756         }
7757     }
7758 
7759     unlock_user(target_mask, target_addr, 0);
7760     return 0;
7761 }
7762 
7763 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7764                                    size_t host_size,
7765                                    abi_ulong target_addr,
7766                                    size_t target_size)
7767 {
7768     unsigned target_bits = sizeof(abi_ulong) * 8;
7769     unsigned host_bits = sizeof(*host_mask) * 8;
7770     abi_ulong *target_mask;
7771     unsigned i, j;
7772 
7773     assert(host_size >= target_size);
7774 
7775     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7776     if (!target_mask) {
7777         return -TARGET_EFAULT;
7778     }
7779 
7780     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7781         unsigned bit = i * target_bits;
7782         abi_ulong val = 0;
7783 
7784         for (j = 0; j < target_bits; j++, bit++) {
7785             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7786                 val |= 1UL << j;
7787             }
7788         }
7789         __put_user(val, &target_mask[i]);
7790     }
7791 
7792     unlock_user(target_mask, target_addr, target_size);
7793     return 0;
7794 }
7795 
7796 /* This is an internal helper for do_syscall so that it is easier
7797  * to have a single return point, so that actions, such as logging
7798  * of syscall results, can be performed.
7799  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7800  */
7801 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7802                             abi_long arg2, abi_long arg3, abi_long arg4,
7803                             abi_long arg5, abi_long arg6, abi_long arg7,
7804                             abi_long arg8)
7805 {
7806     CPUState *cpu = env_cpu(cpu_env);
7807     abi_long ret;
7808 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7809     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7810     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7811     || defined(TARGET_NR_statx)
7812     struct stat st;
7813 #endif
7814 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7815     || defined(TARGET_NR_fstatfs)
7816     struct statfs stfs;
7817 #endif
7818     void *p;
7819 
7820     switch(num) {
7821     case TARGET_NR_exit:
7822         /* In old applications this may be used to implement _exit(2).
7823            However in threaded applictions it is used for thread termination,
7824            and _exit_group is used for application termination.
7825            Do thread termination if we have more then one thread.  */
7826 
7827         if (block_signals()) {
7828             return -TARGET_ERESTARTSYS;
7829         }
7830 
7831         pthread_mutex_lock(&clone_lock);
7832 
7833         if (CPU_NEXT(first_cpu)) {
7834             TaskState *ts = cpu->opaque;
7835 
7836             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7837             object_unref(OBJECT(cpu));
7838             /*
7839              * At this point the CPU should be unrealized and removed
7840              * from cpu lists. We can clean-up the rest of the thread
7841              * data without the lock held.
7842              */
7843 
7844             pthread_mutex_unlock(&clone_lock);
7845 
7846             if (ts->child_tidptr) {
7847                 put_user_u32(0, ts->child_tidptr);
7848                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7849                           NULL, NULL, 0);
7850             }
7851             thread_cpu = NULL;
7852             g_free(ts);
7853             rcu_unregister_thread();
7854             pthread_exit(NULL);
7855         }
7856 
7857         pthread_mutex_unlock(&clone_lock);
7858         preexit_cleanup(cpu_env, arg1);
7859         _exit(arg1);
7860         return 0; /* avoid warning */
7861     case TARGET_NR_read:
7862         if (arg2 == 0 && arg3 == 0) {
7863             return get_errno(safe_read(arg1, 0, 0));
7864         } else {
7865             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7866                 return -TARGET_EFAULT;
7867             ret = get_errno(safe_read(arg1, p, arg3));
7868             if (ret >= 0 &&
7869                 fd_trans_host_to_target_data(arg1)) {
7870                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7871             }
7872             unlock_user(p, arg2, ret);
7873         }
7874         return ret;
7875     case TARGET_NR_write:
7876         if (arg2 == 0 && arg3 == 0) {
7877             return get_errno(safe_write(arg1, 0, 0));
7878         }
7879         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7880             return -TARGET_EFAULT;
7881         if (fd_trans_target_to_host_data(arg1)) {
7882             void *copy = g_malloc(arg3);
7883             memcpy(copy, p, arg3);
7884             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7885             if (ret >= 0) {
7886                 ret = get_errno(safe_write(arg1, copy, ret));
7887             }
7888             g_free(copy);
7889         } else {
7890             ret = get_errno(safe_write(arg1, p, arg3));
7891         }
7892         unlock_user(p, arg2, 0);
7893         return ret;
7894 
7895 #ifdef TARGET_NR_open
7896     case TARGET_NR_open:
7897         if (!(p = lock_user_string(arg1)))
7898             return -TARGET_EFAULT;
7899         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7900                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7901                                   arg3));
7902         fd_trans_unregister(ret);
7903         unlock_user(p, arg1, 0);
7904         return ret;
7905 #endif
7906     case TARGET_NR_openat:
7907         if (!(p = lock_user_string(arg2)))
7908             return -TARGET_EFAULT;
7909         ret = get_errno(do_openat(cpu_env, arg1, p,
7910                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7911                                   arg4));
7912         fd_trans_unregister(ret);
7913         unlock_user(p, arg2, 0);
7914         return ret;
7915 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7916     case TARGET_NR_name_to_handle_at:
7917         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7918         return ret;
7919 #endif
7920 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7921     case TARGET_NR_open_by_handle_at:
7922         ret = do_open_by_handle_at(arg1, arg2, arg3);
7923         fd_trans_unregister(ret);
7924         return ret;
7925 #endif
7926     case TARGET_NR_close:
7927         fd_trans_unregister(arg1);
7928         return get_errno(close(arg1));
7929 
7930     case TARGET_NR_brk:
7931         return do_brk(arg1);
7932 #ifdef TARGET_NR_fork
7933     case TARGET_NR_fork:
7934         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7935 #endif
7936 #ifdef TARGET_NR_waitpid
7937     case TARGET_NR_waitpid:
7938         {
7939             int status;
7940             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7941             if (!is_error(ret) && arg2 && ret
7942                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7943                 return -TARGET_EFAULT;
7944         }
7945         return ret;
7946 #endif
7947 #ifdef TARGET_NR_waitid
7948     case TARGET_NR_waitid:
7949         {
7950             siginfo_t info;
7951             info.si_pid = 0;
7952             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7953             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7954                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7955                     return -TARGET_EFAULT;
7956                 host_to_target_siginfo(p, &info);
7957                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7958             }
7959         }
7960         return ret;
7961 #endif
7962 #ifdef TARGET_NR_creat /* not on alpha */
7963     case TARGET_NR_creat:
7964         if (!(p = lock_user_string(arg1)))
7965             return -TARGET_EFAULT;
7966         ret = get_errno(creat(p, arg2));
7967         fd_trans_unregister(ret);
7968         unlock_user(p, arg1, 0);
7969         return ret;
7970 #endif
7971 #ifdef TARGET_NR_link
7972     case TARGET_NR_link:
7973         {
7974             void * p2;
7975             p = lock_user_string(arg1);
7976             p2 = lock_user_string(arg2);
7977             if (!p || !p2)
7978                 ret = -TARGET_EFAULT;
7979             else
7980                 ret = get_errno(link(p, p2));
7981             unlock_user(p2, arg2, 0);
7982             unlock_user(p, arg1, 0);
7983         }
7984         return ret;
7985 #endif
7986 #if defined(TARGET_NR_linkat)
7987     case TARGET_NR_linkat:
7988         {
7989             void * p2 = NULL;
7990             if (!arg2 || !arg4)
7991                 return -TARGET_EFAULT;
7992             p  = lock_user_string(arg2);
7993             p2 = lock_user_string(arg4);
7994             if (!p || !p2)
7995                 ret = -TARGET_EFAULT;
7996             else
7997                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7998             unlock_user(p, arg2, 0);
7999             unlock_user(p2, arg4, 0);
8000         }
8001         return ret;
8002 #endif
8003 #ifdef TARGET_NR_unlink
8004     case TARGET_NR_unlink:
8005         if (!(p = lock_user_string(arg1)))
8006             return -TARGET_EFAULT;
8007         ret = get_errno(unlink(p));
8008         unlock_user(p, arg1, 0);
8009         return ret;
8010 #endif
8011 #if defined(TARGET_NR_unlinkat)
8012     case TARGET_NR_unlinkat:
8013         if (!(p = lock_user_string(arg2)))
8014             return -TARGET_EFAULT;
8015         ret = get_errno(unlinkat(arg1, p, arg3));
8016         unlock_user(p, arg2, 0);
8017         return ret;
8018 #endif
8019     case TARGET_NR_execve:
8020         {
8021             char **argp, **envp;
8022             int argc, envc;
8023             abi_ulong gp;
8024             abi_ulong guest_argp;
8025             abi_ulong guest_envp;
8026             abi_ulong addr;
8027             char **q;
8028             int total_size = 0;
8029 
8030             argc = 0;
8031             guest_argp = arg2;
8032             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8033                 if (get_user_ual(addr, gp))
8034                     return -TARGET_EFAULT;
8035                 if (!addr)
8036                     break;
8037                 argc++;
8038             }
8039             envc = 0;
8040             guest_envp = arg3;
8041             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8042                 if (get_user_ual(addr, gp))
8043                     return -TARGET_EFAULT;
8044                 if (!addr)
8045                     break;
8046                 envc++;
8047             }
8048 
8049             argp = g_new0(char *, argc + 1);
8050             envp = g_new0(char *, envc + 1);
8051 
8052             for (gp = guest_argp, q = argp; gp;
8053                   gp += sizeof(abi_ulong), q++) {
8054                 if (get_user_ual(addr, gp))
8055                     goto execve_efault;
8056                 if (!addr)
8057                     break;
8058                 if (!(*q = lock_user_string(addr)))
8059                     goto execve_efault;
8060                 total_size += strlen(*q) + 1;
8061             }
8062             *q = NULL;
8063 
8064             for (gp = guest_envp, q = envp; gp;
8065                   gp += sizeof(abi_ulong), q++) {
8066                 if (get_user_ual(addr, gp))
8067                     goto execve_efault;
8068                 if (!addr)
8069                     break;
8070                 if (!(*q = lock_user_string(addr)))
8071                     goto execve_efault;
8072                 total_size += strlen(*q) + 1;
8073             }
8074             *q = NULL;
8075 
8076             if (!(p = lock_user_string(arg1)))
8077                 goto execve_efault;
8078             /* Although execve() is not an interruptible syscall it is
8079              * a special case where we must use the safe_syscall wrapper:
8080              * if we allow a signal to happen before we make the host
8081              * syscall then we will 'lose' it, because at the point of
8082              * execve the process leaves QEMU's control. So we use the
8083              * safe syscall wrapper to ensure that we either take the
8084              * signal as a guest signal, or else it does not happen
8085              * before the execve completes and makes it the other
8086              * program's problem.
8087              */
8088             ret = get_errno(safe_execve(p, argp, envp));
8089             unlock_user(p, arg1, 0);
8090 
8091             goto execve_end;
8092 
8093         execve_efault:
8094             ret = -TARGET_EFAULT;
8095 
8096         execve_end:
8097             for (gp = guest_argp, q = argp; *q;
8098                   gp += sizeof(abi_ulong), q++) {
8099                 if (get_user_ual(addr, gp)
8100                     || !addr)
8101                     break;
8102                 unlock_user(*q, addr, 0);
8103             }
8104             for (gp = guest_envp, q = envp; *q;
8105                   gp += sizeof(abi_ulong), q++) {
8106                 if (get_user_ual(addr, gp)
8107                     || !addr)
8108                     break;
8109                 unlock_user(*q, addr, 0);
8110             }
8111 
8112             g_free(argp);
8113             g_free(envp);
8114         }
8115         return ret;
8116     case TARGET_NR_chdir:
8117         if (!(p = lock_user_string(arg1)))
8118             return -TARGET_EFAULT;
8119         ret = get_errno(chdir(p));
8120         unlock_user(p, arg1, 0);
8121         return ret;
8122 #ifdef TARGET_NR_time
8123     case TARGET_NR_time:
8124         {
8125             time_t host_time;
8126             ret = get_errno(time(&host_time));
8127             if (!is_error(ret)
8128                 && arg1
8129                 && put_user_sal(host_time, arg1))
8130                 return -TARGET_EFAULT;
8131         }
8132         return ret;
8133 #endif
8134 #ifdef TARGET_NR_mknod
8135     case TARGET_NR_mknod:
8136         if (!(p = lock_user_string(arg1)))
8137             return -TARGET_EFAULT;
8138         ret = get_errno(mknod(p, arg2, arg3));
8139         unlock_user(p, arg1, 0);
8140         return ret;
8141 #endif
8142 #if defined(TARGET_NR_mknodat)
8143     case TARGET_NR_mknodat:
8144         if (!(p = lock_user_string(arg2)))
8145             return -TARGET_EFAULT;
8146         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8147         unlock_user(p, arg2, 0);
8148         return ret;
8149 #endif
8150 #ifdef TARGET_NR_chmod
8151     case TARGET_NR_chmod:
8152         if (!(p = lock_user_string(arg1)))
8153             return -TARGET_EFAULT;
8154         ret = get_errno(chmod(p, arg2));
8155         unlock_user(p, arg1, 0);
8156         return ret;
8157 #endif
8158 #ifdef TARGET_NR_lseek
8159     case TARGET_NR_lseek:
8160         return get_errno(lseek(arg1, arg2, arg3));
8161 #endif
8162 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8163     /* Alpha specific */
8164     case TARGET_NR_getxpid:
8165         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8166         return get_errno(getpid());
8167 #endif
8168 #ifdef TARGET_NR_getpid
8169     case TARGET_NR_getpid:
8170         return get_errno(getpid());
8171 #endif
8172     case TARGET_NR_mount:
8173         {
8174             /* need to look at the data field */
8175             void *p2, *p3;
8176 
8177             if (arg1) {
8178                 p = lock_user_string(arg1);
8179                 if (!p) {
8180                     return -TARGET_EFAULT;
8181                 }
8182             } else {
8183                 p = NULL;
8184             }
8185 
8186             p2 = lock_user_string(arg2);
8187             if (!p2) {
8188                 if (arg1) {
8189                     unlock_user(p, arg1, 0);
8190                 }
8191                 return -TARGET_EFAULT;
8192             }
8193 
8194             if (arg3) {
8195                 p3 = lock_user_string(arg3);
8196                 if (!p3) {
8197                     if (arg1) {
8198                         unlock_user(p, arg1, 0);
8199                     }
8200                     unlock_user(p2, arg2, 0);
8201                     return -TARGET_EFAULT;
8202                 }
8203             } else {
8204                 p3 = NULL;
8205             }
8206 
8207             /* FIXME - arg5 should be locked, but it isn't clear how to
8208              * do that since it's not guaranteed to be a NULL-terminated
8209              * string.
8210              */
8211             if (!arg5) {
8212                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8213             } else {
8214                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8215             }
8216             ret = get_errno(ret);
8217 
8218             if (arg1) {
8219                 unlock_user(p, arg1, 0);
8220             }
8221             unlock_user(p2, arg2, 0);
8222             if (arg3) {
8223                 unlock_user(p3, arg3, 0);
8224             }
8225         }
8226         return ret;
8227 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8228 #if defined(TARGET_NR_umount)
8229     case TARGET_NR_umount:
8230 #endif
8231 #if defined(TARGET_NR_oldumount)
8232     case TARGET_NR_oldumount:
8233 #endif
8234         if (!(p = lock_user_string(arg1)))
8235             return -TARGET_EFAULT;
8236         ret = get_errno(umount(p));
8237         unlock_user(p, arg1, 0);
8238         return ret;
8239 #endif
8240 #ifdef TARGET_NR_stime /* not on alpha */
8241     case TARGET_NR_stime:
8242         {
8243             struct timespec ts;
8244             ts.tv_nsec = 0;
8245             if (get_user_sal(ts.tv_sec, arg1)) {
8246                 return -TARGET_EFAULT;
8247             }
8248             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8249         }
8250 #endif
8251 #ifdef TARGET_NR_alarm /* not on alpha */
8252     case TARGET_NR_alarm:
8253         return alarm(arg1);
8254 #endif
8255 #ifdef TARGET_NR_pause /* not on alpha */
8256     case TARGET_NR_pause:
8257         if (!block_signals()) {
8258             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8259         }
8260         return -TARGET_EINTR;
8261 #endif
8262 #ifdef TARGET_NR_utime
8263     case TARGET_NR_utime:
8264         {
8265             struct utimbuf tbuf, *host_tbuf;
8266             struct target_utimbuf *target_tbuf;
8267             if (arg2) {
8268                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8269                     return -TARGET_EFAULT;
8270                 tbuf.actime = tswapal(target_tbuf->actime);
8271                 tbuf.modtime = tswapal(target_tbuf->modtime);
8272                 unlock_user_struct(target_tbuf, arg2, 0);
8273                 host_tbuf = &tbuf;
8274             } else {
8275                 host_tbuf = NULL;
8276             }
8277             if (!(p = lock_user_string(arg1)))
8278                 return -TARGET_EFAULT;
8279             ret = get_errno(utime(p, host_tbuf));
8280             unlock_user(p, arg1, 0);
8281         }
8282         return ret;
8283 #endif
8284 #ifdef TARGET_NR_utimes
8285     case TARGET_NR_utimes:
8286         {
8287             struct timeval *tvp, tv[2];
8288             if (arg2) {
8289                 if (copy_from_user_timeval(&tv[0], arg2)
8290                     || copy_from_user_timeval(&tv[1],
8291                                               arg2 + sizeof(struct target_timeval)))
8292                     return -TARGET_EFAULT;
8293                 tvp = tv;
8294             } else {
8295                 tvp = NULL;
8296             }
8297             if (!(p = lock_user_string(arg1)))
8298                 return -TARGET_EFAULT;
8299             ret = get_errno(utimes(p, tvp));
8300             unlock_user(p, arg1, 0);
8301         }
8302         return ret;
8303 #endif
8304 #if defined(TARGET_NR_futimesat)
8305     case TARGET_NR_futimesat:
8306         {
8307             struct timeval *tvp, tv[2];
8308             if (arg3) {
8309                 if (copy_from_user_timeval(&tv[0], arg3)
8310                     || copy_from_user_timeval(&tv[1],
8311                                               arg3 + sizeof(struct target_timeval)))
8312                     return -TARGET_EFAULT;
8313                 tvp = tv;
8314             } else {
8315                 tvp = NULL;
8316             }
8317             if (!(p = lock_user_string(arg2))) {
8318                 return -TARGET_EFAULT;
8319             }
8320             ret = get_errno(futimesat(arg1, path(p), tvp));
8321             unlock_user(p, arg2, 0);
8322         }
8323         return ret;
8324 #endif
8325 #ifdef TARGET_NR_access
8326     case TARGET_NR_access:
8327         if (!(p = lock_user_string(arg1))) {
8328             return -TARGET_EFAULT;
8329         }
8330         ret = get_errno(access(path(p), arg2));
8331         unlock_user(p, arg1, 0);
8332         return ret;
8333 #endif
8334 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8335     case TARGET_NR_faccessat:
8336         if (!(p = lock_user_string(arg2))) {
8337             return -TARGET_EFAULT;
8338         }
8339         ret = get_errno(faccessat(arg1, p, arg3, 0));
8340         unlock_user(p, arg2, 0);
8341         return ret;
8342 #endif
8343 #ifdef TARGET_NR_nice /* not on alpha */
8344     case TARGET_NR_nice:
8345         return get_errno(nice(arg1));
8346 #endif
8347     case TARGET_NR_sync:
8348         sync();
8349         return 0;
8350 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8351     case TARGET_NR_syncfs:
8352         return get_errno(syncfs(arg1));
8353 #endif
8354     case TARGET_NR_kill:
8355         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8356 #ifdef TARGET_NR_rename
8357     case TARGET_NR_rename:
8358         {
8359             void *p2;
8360             p = lock_user_string(arg1);
8361             p2 = lock_user_string(arg2);
8362             if (!p || !p2)
8363                 ret = -TARGET_EFAULT;
8364             else
8365                 ret = get_errno(rename(p, p2));
8366             unlock_user(p2, arg2, 0);
8367             unlock_user(p, arg1, 0);
8368         }
8369         return ret;
8370 #endif
8371 #if defined(TARGET_NR_renameat)
8372     case TARGET_NR_renameat:
8373         {
8374             void *p2;
8375             p  = lock_user_string(arg2);
8376             p2 = lock_user_string(arg4);
8377             if (!p || !p2)
8378                 ret = -TARGET_EFAULT;
8379             else
8380                 ret = get_errno(renameat(arg1, p, arg3, p2));
8381             unlock_user(p2, arg4, 0);
8382             unlock_user(p, arg2, 0);
8383         }
8384         return ret;
8385 #endif
8386 #if defined(TARGET_NR_renameat2)
8387     case TARGET_NR_renameat2:
8388         {
8389             void *p2;
8390             p  = lock_user_string(arg2);
8391             p2 = lock_user_string(arg4);
8392             if (!p || !p2) {
8393                 ret = -TARGET_EFAULT;
8394             } else {
8395                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8396             }
8397             unlock_user(p2, arg4, 0);
8398             unlock_user(p, arg2, 0);
8399         }
8400         return ret;
8401 #endif
8402 #ifdef TARGET_NR_mkdir
8403     case TARGET_NR_mkdir:
8404         if (!(p = lock_user_string(arg1)))
8405             return -TARGET_EFAULT;
8406         ret = get_errno(mkdir(p, arg2));
8407         unlock_user(p, arg1, 0);
8408         return ret;
8409 #endif
8410 #if defined(TARGET_NR_mkdirat)
8411     case TARGET_NR_mkdirat:
8412         if (!(p = lock_user_string(arg2)))
8413             return -TARGET_EFAULT;
8414         ret = get_errno(mkdirat(arg1, p, arg3));
8415         unlock_user(p, arg2, 0);
8416         return ret;
8417 #endif
8418 #ifdef TARGET_NR_rmdir
8419     case TARGET_NR_rmdir:
8420         if (!(p = lock_user_string(arg1)))
8421             return -TARGET_EFAULT;
8422         ret = get_errno(rmdir(p));
8423         unlock_user(p, arg1, 0);
8424         return ret;
8425 #endif
8426     case TARGET_NR_dup:
8427         ret = get_errno(dup(arg1));
8428         if (ret >= 0) {
8429             fd_trans_dup(arg1, ret);
8430         }
8431         return ret;
8432 #ifdef TARGET_NR_pipe
8433     case TARGET_NR_pipe:
8434         return do_pipe(cpu_env, arg1, 0, 0);
8435 #endif
8436 #ifdef TARGET_NR_pipe2
8437     case TARGET_NR_pipe2:
8438         return do_pipe(cpu_env, arg1,
8439                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8440 #endif
8441     case TARGET_NR_times:
8442         {
8443             struct target_tms *tmsp;
8444             struct tms tms;
8445             ret = get_errno(times(&tms));
8446             if (arg1) {
8447                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8448                 if (!tmsp)
8449                     return -TARGET_EFAULT;
8450                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8451                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8452                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8453                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8454             }
8455             if (!is_error(ret))
8456                 ret = host_to_target_clock_t(ret);
8457         }
8458         return ret;
8459     case TARGET_NR_acct:
8460         if (arg1 == 0) {
8461             ret = get_errno(acct(NULL));
8462         } else {
8463             if (!(p = lock_user_string(arg1))) {
8464                 return -TARGET_EFAULT;
8465             }
8466             ret = get_errno(acct(path(p)));
8467             unlock_user(p, arg1, 0);
8468         }
8469         return ret;
8470 #ifdef TARGET_NR_umount2
8471     case TARGET_NR_umount2:
8472         if (!(p = lock_user_string(arg1)))
8473             return -TARGET_EFAULT;
8474         ret = get_errno(umount2(p, arg2));
8475         unlock_user(p, arg1, 0);
8476         return ret;
8477 #endif
8478     case TARGET_NR_ioctl:
8479         return do_ioctl(arg1, arg2, arg3);
8480 #ifdef TARGET_NR_fcntl
8481     case TARGET_NR_fcntl:
8482         return do_fcntl(arg1, arg2, arg3);
8483 #endif
8484     case TARGET_NR_setpgid:
8485         return get_errno(setpgid(arg1, arg2));
8486     case TARGET_NR_umask:
8487         return get_errno(umask(arg1));
8488     case TARGET_NR_chroot:
8489         if (!(p = lock_user_string(arg1)))
8490             return -TARGET_EFAULT;
8491         ret = get_errno(chroot(p));
8492         unlock_user(p, arg1, 0);
8493         return ret;
8494 #ifdef TARGET_NR_dup2
8495     case TARGET_NR_dup2:
8496         ret = get_errno(dup2(arg1, arg2));
8497         if (ret >= 0) {
8498             fd_trans_dup(arg1, arg2);
8499         }
8500         return ret;
8501 #endif
8502 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8503     case TARGET_NR_dup3:
8504     {
8505         int host_flags;
8506 
8507         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8508             return -EINVAL;
8509         }
8510         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8511         ret = get_errno(dup3(arg1, arg2, host_flags));
8512         if (ret >= 0) {
8513             fd_trans_dup(arg1, arg2);
8514         }
8515         return ret;
8516     }
8517 #endif
8518 #ifdef TARGET_NR_getppid /* not on alpha */
8519     case TARGET_NR_getppid:
8520         return get_errno(getppid());
8521 #endif
8522 #ifdef TARGET_NR_getpgrp
8523     case TARGET_NR_getpgrp:
8524         return get_errno(getpgrp());
8525 #endif
8526     case TARGET_NR_setsid:
8527         return get_errno(setsid());
8528 #ifdef TARGET_NR_sigaction
8529     case TARGET_NR_sigaction:
8530         {
8531 #if defined(TARGET_ALPHA)
8532             struct target_sigaction act, oact, *pact = 0;
8533             struct target_old_sigaction *old_act;
8534             if (arg2) {
8535                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8536                     return -TARGET_EFAULT;
8537                 act._sa_handler = old_act->_sa_handler;
8538                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8539                 act.sa_flags = old_act->sa_flags;
8540                 act.sa_restorer = 0;
8541                 unlock_user_struct(old_act, arg2, 0);
8542                 pact = &act;
8543             }
8544             ret = get_errno(do_sigaction(arg1, pact, &oact));
8545             if (!is_error(ret) && arg3) {
8546                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8547                     return -TARGET_EFAULT;
8548                 old_act->_sa_handler = oact._sa_handler;
8549                 old_act->sa_mask = oact.sa_mask.sig[0];
8550                 old_act->sa_flags = oact.sa_flags;
8551                 unlock_user_struct(old_act, arg3, 1);
8552             }
8553 #elif defined(TARGET_MIPS)
8554 	    struct target_sigaction act, oact, *pact, *old_act;
8555 
8556 	    if (arg2) {
8557                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8558                     return -TARGET_EFAULT;
8559 		act._sa_handler = old_act->_sa_handler;
8560 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8561 		act.sa_flags = old_act->sa_flags;
8562 		unlock_user_struct(old_act, arg2, 0);
8563 		pact = &act;
8564 	    } else {
8565 		pact = NULL;
8566 	    }
8567 
8568 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8569 
8570 	    if (!is_error(ret) && arg3) {
8571                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8572                     return -TARGET_EFAULT;
8573 		old_act->_sa_handler = oact._sa_handler;
8574 		old_act->sa_flags = oact.sa_flags;
8575 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8576 		old_act->sa_mask.sig[1] = 0;
8577 		old_act->sa_mask.sig[2] = 0;
8578 		old_act->sa_mask.sig[3] = 0;
8579 		unlock_user_struct(old_act, arg3, 1);
8580 	    }
8581 #else
8582             struct target_old_sigaction *old_act;
8583             struct target_sigaction act, oact, *pact;
8584             if (arg2) {
8585                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8586                     return -TARGET_EFAULT;
8587                 act._sa_handler = old_act->_sa_handler;
8588                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8589                 act.sa_flags = old_act->sa_flags;
8590                 act.sa_restorer = old_act->sa_restorer;
8591 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8592                 act.ka_restorer = 0;
8593 #endif
8594                 unlock_user_struct(old_act, arg2, 0);
8595                 pact = &act;
8596             } else {
8597                 pact = NULL;
8598             }
8599             ret = get_errno(do_sigaction(arg1, pact, &oact));
8600             if (!is_error(ret) && arg3) {
8601                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8602                     return -TARGET_EFAULT;
8603                 old_act->_sa_handler = oact._sa_handler;
8604                 old_act->sa_mask = oact.sa_mask.sig[0];
8605                 old_act->sa_flags = oact.sa_flags;
8606                 old_act->sa_restorer = oact.sa_restorer;
8607                 unlock_user_struct(old_act, arg3, 1);
8608             }
8609 #endif
8610         }
8611         return ret;
8612 #endif
8613     case TARGET_NR_rt_sigaction:
8614         {
8615 #if defined(TARGET_ALPHA)
8616             /* For Alpha and SPARC this is a 5 argument syscall, with
8617              * a 'restorer' parameter which must be copied into the
8618              * sa_restorer field of the sigaction struct.
8619              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8620              * and arg5 is the sigsetsize.
8621              * Alpha also has a separate rt_sigaction struct that it uses
8622              * here; SPARC uses the usual sigaction struct.
8623              */
8624             struct target_rt_sigaction *rt_act;
8625             struct target_sigaction act, oact, *pact = 0;
8626 
8627             if (arg4 != sizeof(target_sigset_t)) {
8628                 return -TARGET_EINVAL;
8629             }
8630             if (arg2) {
8631                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8632                     return -TARGET_EFAULT;
8633                 act._sa_handler = rt_act->_sa_handler;
8634                 act.sa_mask = rt_act->sa_mask;
8635                 act.sa_flags = rt_act->sa_flags;
8636                 act.sa_restorer = arg5;
8637                 unlock_user_struct(rt_act, arg2, 0);
8638                 pact = &act;
8639             }
8640             ret = get_errno(do_sigaction(arg1, pact, &oact));
8641             if (!is_error(ret) && arg3) {
8642                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8643                     return -TARGET_EFAULT;
8644                 rt_act->_sa_handler = oact._sa_handler;
8645                 rt_act->sa_mask = oact.sa_mask;
8646                 rt_act->sa_flags = oact.sa_flags;
8647                 unlock_user_struct(rt_act, arg3, 1);
8648             }
8649 #else
8650 #ifdef TARGET_SPARC
8651             target_ulong restorer = arg4;
8652             target_ulong sigsetsize = arg5;
8653 #else
8654             target_ulong sigsetsize = arg4;
8655 #endif
8656             struct target_sigaction *act;
8657             struct target_sigaction *oact;
8658 
8659             if (sigsetsize != sizeof(target_sigset_t)) {
8660                 return -TARGET_EINVAL;
8661             }
8662             if (arg2) {
8663                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8664                     return -TARGET_EFAULT;
8665                 }
8666 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8667                 act->ka_restorer = restorer;
8668 #endif
8669             } else {
8670                 act = NULL;
8671             }
8672             if (arg3) {
8673                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8674                     ret = -TARGET_EFAULT;
8675                     goto rt_sigaction_fail;
8676                 }
8677             } else
8678                 oact = NULL;
8679             ret = get_errno(do_sigaction(arg1, act, oact));
8680 	rt_sigaction_fail:
8681             if (act)
8682                 unlock_user_struct(act, arg2, 0);
8683             if (oact)
8684                 unlock_user_struct(oact, arg3, 1);
8685 #endif
8686         }
8687         return ret;
8688 #ifdef TARGET_NR_sgetmask /* not on alpha */
8689     case TARGET_NR_sgetmask:
8690         {
8691             sigset_t cur_set;
8692             abi_ulong target_set;
8693             ret = do_sigprocmask(0, NULL, &cur_set);
8694             if (!ret) {
8695                 host_to_target_old_sigset(&target_set, &cur_set);
8696                 ret = target_set;
8697             }
8698         }
8699         return ret;
8700 #endif
8701 #ifdef TARGET_NR_ssetmask /* not on alpha */
8702     case TARGET_NR_ssetmask:
8703         {
8704             sigset_t set, oset;
8705             abi_ulong target_set = arg1;
8706             target_to_host_old_sigset(&set, &target_set);
8707             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8708             if (!ret) {
8709                 host_to_target_old_sigset(&target_set, &oset);
8710                 ret = target_set;
8711             }
8712         }
8713         return ret;
8714 #endif
8715 #ifdef TARGET_NR_sigprocmask
8716     case TARGET_NR_sigprocmask:
8717         {
8718 #if defined(TARGET_ALPHA)
8719             sigset_t set, oldset;
8720             abi_ulong mask;
8721             int how;
8722 
8723             switch (arg1) {
8724             case TARGET_SIG_BLOCK:
8725                 how = SIG_BLOCK;
8726                 break;
8727             case TARGET_SIG_UNBLOCK:
8728                 how = SIG_UNBLOCK;
8729                 break;
8730             case TARGET_SIG_SETMASK:
8731                 how = SIG_SETMASK;
8732                 break;
8733             default:
8734                 return -TARGET_EINVAL;
8735             }
8736             mask = arg2;
8737             target_to_host_old_sigset(&set, &mask);
8738 
8739             ret = do_sigprocmask(how, &set, &oldset);
8740             if (!is_error(ret)) {
8741                 host_to_target_old_sigset(&mask, &oldset);
8742                 ret = mask;
8743                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8744             }
8745 #else
8746             sigset_t set, oldset, *set_ptr;
8747             int how;
8748 
8749             if (arg2) {
8750                 switch (arg1) {
8751                 case TARGET_SIG_BLOCK:
8752                     how = SIG_BLOCK;
8753                     break;
8754                 case TARGET_SIG_UNBLOCK:
8755                     how = SIG_UNBLOCK;
8756                     break;
8757                 case TARGET_SIG_SETMASK:
8758                     how = SIG_SETMASK;
8759                     break;
8760                 default:
8761                     return -TARGET_EINVAL;
8762                 }
8763                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8764                     return -TARGET_EFAULT;
8765                 target_to_host_old_sigset(&set, p);
8766                 unlock_user(p, arg2, 0);
8767                 set_ptr = &set;
8768             } else {
8769                 how = 0;
8770                 set_ptr = NULL;
8771             }
8772             ret = do_sigprocmask(how, set_ptr, &oldset);
8773             if (!is_error(ret) && arg3) {
8774                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8775                     return -TARGET_EFAULT;
8776                 host_to_target_old_sigset(p, &oldset);
8777                 unlock_user(p, arg3, sizeof(target_sigset_t));
8778             }
8779 #endif
8780         }
8781         return ret;
8782 #endif
8783     case TARGET_NR_rt_sigprocmask:
8784         {
8785             int how = arg1;
8786             sigset_t set, oldset, *set_ptr;
8787 
8788             if (arg4 != sizeof(target_sigset_t)) {
8789                 return -TARGET_EINVAL;
8790             }
8791 
8792             if (arg2) {
8793                 switch(how) {
8794                 case TARGET_SIG_BLOCK:
8795                     how = SIG_BLOCK;
8796                     break;
8797                 case TARGET_SIG_UNBLOCK:
8798                     how = SIG_UNBLOCK;
8799                     break;
8800                 case TARGET_SIG_SETMASK:
8801                     how = SIG_SETMASK;
8802                     break;
8803                 default:
8804                     return -TARGET_EINVAL;
8805                 }
8806                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8807                     return -TARGET_EFAULT;
8808                 target_to_host_sigset(&set, p);
8809                 unlock_user(p, arg2, 0);
8810                 set_ptr = &set;
8811             } else {
8812                 how = 0;
8813                 set_ptr = NULL;
8814             }
8815             ret = do_sigprocmask(how, set_ptr, &oldset);
8816             if (!is_error(ret) && arg3) {
8817                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8818                     return -TARGET_EFAULT;
8819                 host_to_target_sigset(p, &oldset);
8820                 unlock_user(p, arg3, sizeof(target_sigset_t));
8821             }
8822         }
8823         return ret;
8824 #ifdef TARGET_NR_sigpending
8825     case TARGET_NR_sigpending:
8826         {
8827             sigset_t set;
8828             ret = get_errno(sigpending(&set));
8829             if (!is_error(ret)) {
8830                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8831                     return -TARGET_EFAULT;
8832                 host_to_target_old_sigset(p, &set);
8833                 unlock_user(p, arg1, sizeof(target_sigset_t));
8834             }
8835         }
8836         return ret;
8837 #endif
8838     case TARGET_NR_rt_sigpending:
8839         {
8840             sigset_t set;
8841 
8842             /* Yes, this check is >, not != like most. We follow the kernel's
8843              * logic and it does it like this because it implements
8844              * NR_sigpending through the same code path, and in that case
8845              * the old_sigset_t is smaller in size.
8846              */
8847             if (arg2 > sizeof(target_sigset_t)) {
8848                 return -TARGET_EINVAL;
8849             }
8850 
8851             ret = get_errno(sigpending(&set));
8852             if (!is_error(ret)) {
8853                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8854                     return -TARGET_EFAULT;
8855                 host_to_target_sigset(p, &set);
8856                 unlock_user(p, arg1, sizeof(target_sigset_t));
8857             }
8858         }
8859         return ret;
8860 #ifdef TARGET_NR_sigsuspend
8861     case TARGET_NR_sigsuspend:
8862         {
8863             TaskState *ts = cpu->opaque;
8864 #if defined(TARGET_ALPHA)
8865             abi_ulong mask = arg1;
8866             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8867 #else
8868             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8869                 return -TARGET_EFAULT;
8870             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8871             unlock_user(p, arg1, 0);
8872 #endif
8873             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8874                                                SIGSET_T_SIZE));
8875             if (ret != -TARGET_ERESTARTSYS) {
8876                 ts->in_sigsuspend = 1;
8877             }
8878         }
8879         return ret;
8880 #endif
8881     case TARGET_NR_rt_sigsuspend:
8882         {
8883             TaskState *ts = cpu->opaque;
8884 
8885             if (arg2 != sizeof(target_sigset_t)) {
8886                 return -TARGET_EINVAL;
8887             }
8888             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8889                 return -TARGET_EFAULT;
8890             target_to_host_sigset(&ts->sigsuspend_mask, p);
8891             unlock_user(p, arg1, 0);
8892             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8893                                                SIGSET_T_SIZE));
8894             if (ret != -TARGET_ERESTARTSYS) {
8895                 ts->in_sigsuspend = 1;
8896             }
8897         }
8898         return ret;
8899 #ifdef TARGET_NR_rt_sigtimedwait
8900     case TARGET_NR_rt_sigtimedwait:
8901         {
8902             sigset_t set;
8903             struct timespec uts, *puts;
8904             siginfo_t uinfo;
8905 
8906             if (arg4 != sizeof(target_sigset_t)) {
8907                 return -TARGET_EINVAL;
8908             }
8909 
8910             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8911                 return -TARGET_EFAULT;
8912             target_to_host_sigset(&set, p);
8913             unlock_user(p, arg1, 0);
8914             if (arg3) {
8915                 puts = &uts;
8916                 if (target_to_host_timespec(puts, arg3)) {
8917                     return -TARGET_EFAULT;
8918                 }
8919             } else {
8920                 puts = NULL;
8921             }
8922             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8923                                                  SIGSET_T_SIZE));
8924             if (!is_error(ret)) {
8925                 if (arg2) {
8926                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8927                                   0);
8928                     if (!p) {
8929                         return -TARGET_EFAULT;
8930                     }
8931                     host_to_target_siginfo(p, &uinfo);
8932                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8933                 }
8934                 ret = host_to_target_signal(ret);
8935             }
8936         }
8937         return ret;
8938 #endif
8939     case TARGET_NR_rt_sigqueueinfo:
8940         {
8941             siginfo_t uinfo;
8942 
8943             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8944             if (!p) {
8945                 return -TARGET_EFAULT;
8946             }
8947             target_to_host_siginfo(&uinfo, p);
8948             unlock_user(p, arg3, 0);
8949             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8950         }
8951         return ret;
8952     case TARGET_NR_rt_tgsigqueueinfo:
8953         {
8954             siginfo_t uinfo;
8955 
8956             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8957             if (!p) {
8958                 return -TARGET_EFAULT;
8959             }
8960             target_to_host_siginfo(&uinfo, p);
8961             unlock_user(p, arg4, 0);
8962             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8963         }
8964         return ret;
8965 #ifdef TARGET_NR_sigreturn
8966     case TARGET_NR_sigreturn:
8967         if (block_signals()) {
8968             return -TARGET_ERESTARTSYS;
8969         }
8970         return do_sigreturn(cpu_env);
8971 #endif
8972     case TARGET_NR_rt_sigreturn:
8973         if (block_signals()) {
8974             return -TARGET_ERESTARTSYS;
8975         }
8976         return do_rt_sigreturn(cpu_env);
8977     case TARGET_NR_sethostname:
8978         if (!(p = lock_user_string(arg1)))
8979             return -TARGET_EFAULT;
8980         ret = get_errno(sethostname(p, arg2));
8981         unlock_user(p, arg1, 0);
8982         return ret;
8983 #ifdef TARGET_NR_setrlimit
8984     case TARGET_NR_setrlimit:
8985         {
8986             int resource = target_to_host_resource(arg1);
8987             struct target_rlimit *target_rlim;
8988             struct rlimit rlim;
8989             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8990                 return -TARGET_EFAULT;
8991             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8992             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8993             unlock_user_struct(target_rlim, arg2, 0);
8994             /*
8995              * If we just passed through resource limit settings for memory then
8996              * they would also apply to QEMU's own allocations, and QEMU will
8997              * crash or hang or die if its allocations fail. Ideally we would
8998              * track the guest allocations in QEMU and apply the limits ourselves.
8999              * For now, just tell the guest the call succeeded but don't actually
9000              * limit anything.
9001              */
9002             if (resource != RLIMIT_AS &&
9003                 resource != RLIMIT_DATA &&
9004                 resource != RLIMIT_STACK) {
9005                 return get_errno(setrlimit(resource, &rlim));
9006             } else {
9007                 return 0;
9008             }
9009         }
9010 #endif
9011 #ifdef TARGET_NR_getrlimit
9012     case TARGET_NR_getrlimit:
9013         {
9014             int resource = target_to_host_resource(arg1);
9015             struct target_rlimit *target_rlim;
9016             struct rlimit rlim;
9017 
9018             ret = get_errno(getrlimit(resource, &rlim));
9019             if (!is_error(ret)) {
9020                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9021                     return -TARGET_EFAULT;
9022                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9023                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9024                 unlock_user_struct(target_rlim, arg2, 1);
9025             }
9026         }
9027         return ret;
9028 #endif
9029     case TARGET_NR_getrusage:
9030         {
9031             struct rusage rusage;
9032             ret = get_errno(getrusage(arg1, &rusage));
9033             if (!is_error(ret)) {
9034                 ret = host_to_target_rusage(arg2, &rusage);
9035             }
9036         }
9037         return ret;
9038 #if defined(TARGET_NR_gettimeofday)
9039     case TARGET_NR_gettimeofday:
9040         {
9041             struct timeval tv;
9042             struct timezone tz;
9043 
9044             ret = get_errno(gettimeofday(&tv, &tz));
9045             if (!is_error(ret)) {
9046                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9047                     return -TARGET_EFAULT;
9048                 }
9049                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9050                     return -TARGET_EFAULT;
9051                 }
9052             }
9053         }
9054         return ret;
9055 #endif
9056 #if defined(TARGET_NR_settimeofday)
9057     case TARGET_NR_settimeofday:
9058         {
9059             struct timeval tv, *ptv = NULL;
9060             struct timezone tz, *ptz = NULL;
9061 
9062             if (arg1) {
9063                 if (copy_from_user_timeval(&tv, arg1)) {
9064                     return -TARGET_EFAULT;
9065                 }
9066                 ptv = &tv;
9067             }
9068 
9069             if (arg2) {
9070                 if (copy_from_user_timezone(&tz, arg2)) {
9071                     return -TARGET_EFAULT;
9072                 }
9073                 ptz = &tz;
9074             }
9075 
9076             return get_errno(settimeofday(ptv, ptz));
9077         }
9078 #endif
9079 #if defined(TARGET_NR_select)
9080     case TARGET_NR_select:
9081 #if defined(TARGET_WANT_NI_OLD_SELECT)
9082         /* some architectures used to have old_select here
9083          * but now ENOSYS it.
9084          */
9085         ret = -TARGET_ENOSYS;
9086 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9087         ret = do_old_select(arg1);
9088 #else
9089         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9090 #endif
9091         return ret;
9092 #endif
9093 #ifdef TARGET_NR_pselect6
9094     case TARGET_NR_pselect6:
9095         {
9096             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9097             fd_set rfds, wfds, efds;
9098             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9099             struct timespec ts, *ts_ptr;
9100 
9101             /*
9102              * The 6th arg is actually two args smashed together,
9103              * so we cannot use the C library.
9104              */
9105             sigset_t set;
9106             struct {
9107                 sigset_t *set;
9108                 size_t size;
9109             } sig, *sig_ptr;
9110 
9111             abi_ulong arg_sigset, arg_sigsize, *arg7;
9112             target_sigset_t *target_sigset;
9113 
9114             n = arg1;
9115             rfd_addr = arg2;
9116             wfd_addr = arg3;
9117             efd_addr = arg4;
9118             ts_addr = arg5;
9119 
9120             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9121             if (ret) {
9122                 return ret;
9123             }
9124             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9125             if (ret) {
9126                 return ret;
9127             }
9128             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9129             if (ret) {
9130                 return ret;
9131             }
9132 
9133             /*
9134              * This takes a timespec, and not a timeval, so we cannot
9135              * use the do_select() helper ...
9136              */
9137             if (ts_addr) {
9138                 if (target_to_host_timespec(&ts, ts_addr)) {
9139                     return -TARGET_EFAULT;
9140                 }
9141                 ts_ptr = &ts;
9142             } else {
9143                 ts_ptr = NULL;
9144             }
9145 
9146             /* Extract the two packed args for the sigset */
9147             if (arg6) {
9148                 sig_ptr = &sig;
9149                 sig.size = SIGSET_T_SIZE;
9150 
9151                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9152                 if (!arg7) {
9153                     return -TARGET_EFAULT;
9154                 }
9155                 arg_sigset = tswapal(arg7[0]);
9156                 arg_sigsize = tswapal(arg7[1]);
9157                 unlock_user(arg7, arg6, 0);
9158 
9159                 if (arg_sigset) {
9160                     sig.set = &set;
9161                     if (arg_sigsize != sizeof(*target_sigset)) {
9162                         /* Like the kernel, we enforce correct size sigsets */
9163                         return -TARGET_EINVAL;
9164                     }
9165                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9166                                               sizeof(*target_sigset), 1);
9167                     if (!target_sigset) {
9168                         return -TARGET_EFAULT;
9169                     }
9170                     target_to_host_sigset(&set, target_sigset);
9171                     unlock_user(target_sigset, arg_sigset, 0);
9172                 } else {
9173                     sig.set = NULL;
9174                 }
9175             } else {
9176                 sig_ptr = NULL;
9177             }
9178 
9179             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9180                                           ts_ptr, sig_ptr));
9181 
9182             if (!is_error(ret)) {
9183                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9184                     return -TARGET_EFAULT;
9185                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9186                     return -TARGET_EFAULT;
9187                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9188                     return -TARGET_EFAULT;
9189 
9190                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9191                     return -TARGET_EFAULT;
9192             }
9193         }
9194         return ret;
9195 #endif
9196 #ifdef TARGET_NR_symlink
9197     case TARGET_NR_symlink:
9198         {
9199             void *p2;
9200             p = lock_user_string(arg1);
9201             p2 = lock_user_string(arg2);
9202             if (!p || !p2)
9203                 ret = -TARGET_EFAULT;
9204             else
9205                 ret = get_errno(symlink(p, p2));
9206             unlock_user(p2, arg2, 0);
9207             unlock_user(p, arg1, 0);
9208         }
9209         return ret;
9210 #endif
9211 #if defined(TARGET_NR_symlinkat)
9212     case TARGET_NR_symlinkat:
9213         {
9214             void *p2;
9215             p  = lock_user_string(arg1);
9216             p2 = lock_user_string(arg3);
9217             if (!p || !p2)
9218                 ret = -TARGET_EFAULT;
9219             else
9220                 ret = get_errno(symlinkat(p, arg2, p2));
9221             unlock_user(p2, arg3, 0);
9222             unlock_user(p, arg1, 0);
9223         }
9224         return ret;
9225 #endif
9226 #ifdef TARGET_NR_readlink
9227     case TARGET_NR_readlink:
9228         {
9229             void *p2;
9230             p = lock_user_string(arg1);
9231             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9232             if (!p || !p2) {
9233                 ret = -TARGET_EFAULT;
9234             } else if (!arg3) {
9235                 /* Short circuit this for the magic exe check. */
9236                 ret = -TARGET_EINVAL;
9237             } else if (is_proc_myself((const char *)p, "exe")) {
9238                 char real[PATH_MAX], *temp;
9239                 temp = realpath(exec_path, real);
9240                 /* Return value is # of bytes that we wrote to the buffer. */
9241                 if (temp == NULL) {
9242                     ret = get_errno(-1);
9243                 } else {
9244                     /* Don't worry about sign mismatch as earlier mapping
9245                      * logic would have thrown a bad address error. */
9246                     ret = MIN(strlen(real), arg3);
9247                     /* We cannot NUL terminate the string. */
9248                     memcpy(p2, real, ret);
9249                 }
9250             } else {
9251                 ret = get_errno(readlink(path(p), p2, arg3));
9252             }
9253             unlock_user(p2, arg2, ret);
9254             unlock_user(p, arg1, 0);
9255         }
9256         return ret;
9257 #endif
9258 #if defined(TARGET_NR_readlinkat)
9259     case TARGET_NR_readlinkat:
9260         {
9261             void *p2;
9262             p  = lock_user_string(arg2);
9263             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9264             if (!p || !p2) {
9265                 ret = -TARGET_EFAULT;
9266             } else if (is_proc_myself((const char *)p, "exe")) {
9267                 char real[PATH_MAX], *temp;
9268                 temp = realpath(exec_path, real);
9269                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9270                 snprintf((char *)p2, arg4, "%s", real);
9271             } else {
9272                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9273             }
9274             unlock_user(p2, arg3, ret);
9275             unlock_user(p, arg2, 0);
9276         }
9277         return ret;
9278 #endif
9279 #ifdef TARGET_NR_swapon
9280     case TARGET_NR_swapon:
9281         if (!(p = lock_user_string(arg1)))
9282             return -TARGET_EFAULT;
9283         ret = get_errno(swapon(p, arg2));
9284         unlock_user(p, arg1, 0);
9285         return ret;
9286 #endif
9287     case TARGET_NR_reboot:
9288         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9289            /* arg4 must be ignored in all other cases */
9290            p = lock_user_string(arg4);
9291            if (!p) {
9292                return -TARGET_EFAULT;
9293            }
9294            ret = get_errno(reboot(arg1, arg2, arg3, p));
9295            unlock_user(p, arg4, 0);
9296         } else {
9297            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9298         }
9299         return ret;
9300 #ifdef TARGET_NR_mmap
9301     case TARGET_NR_mmap:
9302 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9303     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9304     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9305     || defined(TARGET_S390X)
9306         {
9307             abi_ulong *v;
9308             abi_ulong v1, v2, v3, v4, v5, v6;
9309             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9310                 return -TARGET_EFAULT;
9311             v1 = tswapal(v[0]);
9312             v2 = tswapal(v[1]);
9313             v3 = tswapal(v[2]);
9314             v4 = tswapal(v[3]);
9315             v5 = tswapal(v[4]);
9316             v6 = tswapal(v[5]);
9317             unlock_user(v, arg1, 0);
9318             ret = get_errno(target_mmap(v1, v2, v3,
9319                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9320                                         v5, v6));
9321         }
9322 #else
9323         ret = get_errno(target_mmap(arg1, arg2, arg3,
9324                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9325                                     arg5,
9326                                     arg6));
9327 #endif
9328         return ret;
9329 #endif
9330 #ifdef TARGET_NR_mmap2
9331     case TARGET_NR_mmap2:
9332 #ifndef MMAP_SHIFT
9333 #define MMAP_SHIFT 12
9334 #endif
9335         ret = target_mmap(arg1, arg2, arg3,
9336                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9337                           arg5, arg6 << MMAP_SHIFT);
9338         return get_errno(ret);
9339 #endif
9340     case TARGET_NR_munmap:
9341         return get_errno(target_munmap(arg1, arg2));
9342     case TARGET_NR_mprotect:
9343         {
9344             TaskState *ts = cpu->opaque;
9345             /* Special hack to detect libc making the stack executable.  */
9346             if ((arg3 & PROT_GROWSDOWN)
9347                 && arg1 >= ts->info->stack_limit
9348                 && arg1 <= ts->info->start_stack) {
9349                 arg3 &= ~PROT_GROWSDOWN;
9350                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9351                 arg1 = ts->info->stack_limit;
9352             }
9353         }
9354         return get_errno(target_mprotect(arg1, arg2, arg3));
9355 #ifdef TARGET_NR_mremap
9356     case TARGET_NR_mremap:
9357         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9358 #endif
9359         /* ??? msync/mlock/munlock are broken for softmmu.  */
9360 #ifdef TARGET_NR_msync
9361     case TARGET_NR_msync:
9362         return get_errno(msync(g2h(arg1), arg2, arg3));
9363 #endif
9364 #ifdef TARGET_NR_mlock
9365     case TARGET_NR_mlock:
9366         return get_errno(mlock(g2h(arg1), arg2));
9367 #endif
9368 #ifdef TARGET_NR_munlock
9369     case TARGET_NR_munlock:
9370         return get_errno(munlock(g2h(arg1), arg2));
9371 #endif
9372 #ifdef TARGET_NR_mlockall
9373     case TARGET_NR_mlockall:
9374         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9375 #endif
9376 #ifdef TARGET_NR_munlockall
9377     case TARGET_NR_munlockall:
9378         return get_errno(munlockall());
9379 #endif
9380 #ifdef TARGET_NR_truncate
9381     case TARGET_NR_truncate:
9382         if (!(p = lock_user_string(arg1)))
9383             return -TARGET_EFAULT;
9384         ret = get_errno(truncate(p, arg2));
9385         unlock_user(p, arg1, 0);
9386         return ret;
9387 #endif
9388 #ifdef TARGET_NR_ftruncate
9389     case TARGET_NR_ftruncate:
9390         return get_errno(ftruncate(arg1, arg2));
9391 #endif
9392     case TARGET_NR_fchmod:
9393         return get_errno(fchmod(arg1, arg2));
9394 #if defined(TARGET_NR_fchmodat)
9395     case TARGET_NR_fchmodat:
9396         if (!(p = lock_user_string(arg2)))
9397             return -TARGET_EFAULT;
9398         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9399         unlock_user(p, arg2, 0);
9400         return ret;
9401 #endif
9402     case TARGET_NR_getpriority:
9403         /* Note that negative values are valid for getpriority, so we must
9404            differentiate based on errno settings.  */
9405         errno = 0;
9406         ret = getpriority(arg1, arg2);
9407         if (ret == -1 && errno != 0) {
9408             return -host_to_target_errno(errno);
9409         }
9410 #ifdef TARGET_ALPHA
9411         /* Return value is the unbiased priority.  Signal no error.  */
9412         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9413 #else
9414         /* Return value is a biased priority to avoid negative numbers.  */
9415         ret = 20 - ret;
9416 #endif
9417         return ret;
9418     case TARGET_NR_setpriority:
9419         return get_errno(setpriority(arg1, arg2, arg3));
9420 #ifdef TARGET_NR_statfs
9421     case TARGET_NR_statfs:
9422         if (!(p = lock_user_string(arg1))) {
9423             return -TARGET_EFAULT;
9424         }
9425         ret = get_errno(statfs(path(p), &stfs));
9426         unlock_user(p, arg1, 0);
9427     convert_statfs:
9428         if (!is_error(ret)) {
9429             struct target_statfs *target_stfs;
9430 
9431             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9432                 return -TARGET_EFAULT;
9433             __put_user(stfs.f_type, &target_stfs->f_type);
9434             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9435             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9436             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9437             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9438             __put_user(stfs.f_files, &target_stfs->f_files);
9439             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9440             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9441             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9442             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9443             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9444 #ifdef _STATFS_F_FLAGS
9445             __put_user(stfs.f_flags, &target_stfs->f_flags);
9446 #else
9447             __put_user(0, &target_stfs->f_flags);
9448 #endif
9449             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9450             unlock_user_struct(target_stfs, arg2, 1);
9451         }
9452         return ret;
9453 #endif
9454 #ifdef TARGET_NR_fstatfs
9455     case TARGET_NR_fstatfs:
9456         ret = get_errno(fstatfs(arg1, &stfs));
9457         goto convert_statfs;
9458 #endif
9459 #ifdef TARGET_NR_statfs64
9460     case TARGET_NR_statfs64:
9461         if (!(p = lock_user_string(arg1))) {
9462             return -TARGET_EFAULT;
9463         }
9464         ret = get_errno(statfs(path(p), &stfs));
9465         unlock_user(p, arg1, 0);
9466     convert_statfs64:
9467         if (!is_error(ret)) {
9468             struct target_statfs64 *target_stfs;
9469 
9470             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9471                 return -TARGET_EFAULT;
9472             __put_user(stfs.f_type, &target_stfs->f_type);
9473             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9474             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9475             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9476             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9477             __put_user(stfs.f_files, &target_stfs->f_files);
9478             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9479             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9480             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9481             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9482             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9483             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9484             unlock_user_struct(target_stfs, arg3, 1);
9485         }
9486         return ret;
9487     case TARGET_NR_fstatfs64:
9488         ret = get_errno(fstatfs(arg1, &stfs));
9489         goto convert_statfs64;
9490 #endif
9491 #ifdef TARGET_NR_socketcall
9492     case TARGET_NR_socketcall:
9493         return do_socketcall(arg1, arg2);
9494 #endif
9495 #ifdef TARGET_NR_accept
9496     case TARGET_NR_accept:
9497         return do_accept4(arg1, arg2, arg3, 0);
9498 #endif
9499 #ifdef TARGET_NR_accept4
9500     case TARGET_NR_accept4:
9501         return do_accept4(arg1, arg2, arg3, arg4);
9502 #endif
9503 #ifdef TARGET_NR_bind
9504     case TARGET_NR_bind:
9505         return do_bind(arg1, arg2, arg3);
9506 #endif
9507 #ifdef TARGET_NR_connect
9508     case TARGET_NR_connect:
9509         return do_connect(arg1, arg2, arg3);
9510 #endif
9511 #ifdef TARGET_NR_getpeername
9512     case TARGET_NR_getpeername:
9513         return do_getpeername(arg1, arg2, arg3);
9514 #endif
9515 #ifdef TARGET_NR_getsockname
9516     case TARGET_NR_getsockname:
9517         return do_getsockname(arg1, arg2, arg3);
9518 #endif
9519 #ifdef TARGET_NR_getsockopt
9520     case TARGET_NR_getsockopt:
9521         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9522 #endif
9523 #ifdef TARGET_NR_listen
9524     case TARGET_NR_listen:
9525         return get_errno(listen(arg1, arg2));
9526 #endif
9527 #ifdef TARGET_NR_recv
9528     case TARGET_NR_recv:
9529         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9530 #endif
9531 #ifdef TARGET_NR_recvfrom
9532     case TARGET_NR_recvfrom:
9533         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9534 #endif
9535 #ifdef TARGET_NR_recvmsg
9536     case TARGET_NR_recvmsg:
9537         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9538 #endif
9539 #ifdef TARGET_NR_send
9540     case TARGET_NR_send:
9541         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9542 #endif
9543 #ifdef TARGET_NR_sendmsg
9544     case TARGET_NR_sendmsg:
9545         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9546 #endif
9547 #ifdef TARGET_NR_sendmmsg
9548     case TARGET_NR_sendmmsg:
9549         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9550 #endif
9551 #ifdef TARGET_NR_recvmmsg
9552     case TARGET_NR_recvmmsg:
9553         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9554 #endif
9555 #ifdef TARGET_NR_sendto
9556     case TARGET_NR_sendto:
9557         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9558 #endif
9559 #ifdef TARGET_NR_shutdown
9560     case TARGET_NR_shutdown:
9561         return get_errno(shutdown(arg1, arg2));
9562 #endif
9563 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9564     case TARGET_NR_getrandom:
9565         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9566         if (!p) {
9567             return -TARGET_EFAULT;
9568         }
9569         ret = get_errno(getrandom(p, arg2, arg3));
9570         unlock_user(p, arg1, ret);
9571         return ret;
9572 #endif
9573 #ifdef TARGET_NR_socket
9574     case TARGET_NR_socket:
9575         return do_socket(arg1, arg2, arg3);
9576 #endif
9577 #ifdef TARGET_NR_socketpair
9578     case TARGET_NR_socketpair:
9579         return do_socketpair(arg1, arg2, arg3, arg4);
9580 #endif
9581 #ifdef TARGET_NR_setsockopt
9582     case TARGET_NR_setsockopt:
9583         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9584 #endif
9585 #if defined(TARGET_NR_syslog)
9586     case TARGET_NR_syslog:
9587         {
9588             int len = arg2;
9589 
9590             switch (arg1) {
9591             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9592             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9593             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9594             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9595             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9596             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9597             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9598             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9599                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9600             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9601             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9602             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9603                 {
9604                     if (len < 0) {
9605                         return -TARGET_EINVAL;
9606                     }
9607                     if (len == 0) {
9608                         return 0;
9609                     }
9610                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9611                     if (!p) {
9612                         return -TARGET_EFAULT;
9613                     }
9614                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9615                     unlock_user(p, arg2, arg3);
9616                 }
9617                 return ret;
9618             default:
9619                 return -TARGET_EINVAL;
9620             }
9621         }
9622         break;
9623 #endif
9624     case TARGET_NR_setitimer:
9625         {
9626             struct itimerval value, ovalue, *pvalue;
9627 
9628             if (arg2) {
9629                 pvalue = &value;
9630                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9631                     || copy_from_user_timeval(&pvalue->it_value,
9632                                               arg2 + sizeof(struct target_timeval)))
9633                     return -TARGET_EFAULT;
9634             } else {
9635                 pvalue = NULL;
9636             }
9637             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9638             if (!is_error(ret) && arg3) {
9639                 if (copy_to_user_timeval(arg3,
9640                                          &ovalue.it_interval)
9641                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9642                                             &ovalue.it_value))
9643                     return -TARGET_EFAULT;
9644             }
9645         }
9646         return ret;
9647     case TARGET_NR_getitimer:
9648         {
9649             struct itimerval value;
9650 
9651             ret = get_errno(getitimer(arg1, &value));
9652             if (!is_error(ret) && arg2) {
9653                 if (copy_to_user_timeval(arg2,
9654                                          &value.it_interval)
9655                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9656                                             &value.it_value))
9657                     return -TARGET_EFAULT;
9658             }
9659         }
9660         return ret;
9661 #ifdef TARGET_NR_stat
9662     case TARGET_NR_stat:
9663         if (!(p = lock_user_string(arg1))) {
9664             return -TARGET_EFAULT;
9665         }
9666         ret = get_errno(stat(path(p), &st));
9667         unlock_user(p, arg1, 0);
9668         goto do_stat;
9669 #endif
9670 #ifdef TARGET_NR_lstat
9671     case TARGET_NR_lstat:
9672         if (!(p = lock_user_string(arg1))) {
9673             return -TARGET_EFAULT;
9674         }
9675         ret = get_errno(lstat(path(p), &st));
9676         unlock_user(p, arg1, 0);
9677         goto do_stat;
9678 #endif
9679 #ifdef TARGET_NR_fstat
9680     case TARGET_NR_fstat:
9681         {
9682             ret = get_errno(fstat(arg1, &st));
9683 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9684         do_stat:
9685 #endif
9686             if (!is_error(ret)) {
9687                 struct target_stat *target_st;
9688 
9689                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9690                     return -TARGET_EFAULT;
9691                 memset(target_st, 0, sizeof(*target_st));
9692                 __put_user(st.st_dev, &target_st->st_dev);
9693                 __put_user(st.st_ino, &target_st->st_ino);
9694                 __put_user(st.st_mode, &target_st->st_mode);
9695                 __put_user(st.st_uid, &target_st->st_uid);
9696                 __put_user(st.st_gid, &target_st->st_gid);
9697                 __put_user(st.st_nlink, &target_st->st_nlink);
9698                 __put_user(st.st_rdev, &target_st->st_rdev);
9699                 __put_user(st.st_size, &target_st->st_size);
9700                 __put_user(st.st_blksize, &target_st->st_blksize);
9701                 __put_user(st.st_blocks, &target_st->st_blocks);
9702                 __put_user(st.st_atime, &target_st->target_st_atime);
9703                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9704                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9705 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9706     defined(TARGET_STAT_HAVE_NSEC)
9707                 __put_user(st.st_atim.tv_nsec,
9708                            &target_st->target_st_atime_nsec);
9709                 __put_user(st.st_mtim.tv_nsec,
9710                            &target_st->target_st_mtime_nsec);
9711                 __put_user(st.st_ctim.tv_nsec,
9712                            &target_st->target_st_ctime_nsec);
9713 #endif
9714                 unlock_user_struct(target_st, arg2, 1);
9715             }
9716         }
9717         return ret;
9718 #endif
9719     case TARGET_NR_vhangup:
9720         return get_errno(vhangup());
9721 #ifdef TARGET_NR_syscall
9722     case TARGET_NR_syscall:
9723         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9724                           arg6, arg7, arg8, 0);
9725 #endif
9726 #if defined(TARGET_NR_wait4)
9727     case TARGET_NR_wait4:
9728         {
9729             int status;
9730             abi_long status_ptr = arg2;
9731             struct rusage rusage, *rusage_ptr;
9732             abi_ulong target_rusage = arg4;
9733             abi_long rusage_err;
9734             if (target_rusage)
9735                 rusage_ptr = &rusage;
9736             else
9737                 rusage_ptr = NULL;
9738             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9739             if (!is_error(ret)) {
9740                 if (status_ptr && ret) {
9741                     status = host_to_target_waitstatus(status);
9742                     if (put_user_s32(status, status_ptr))
9743                         return -TARGET_EFAULT;
9744                 }
9745                 if (target_rusage) {
9746                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9747                     if (rusage_err) {
9748                         ret = rusage_err;
9749                     }
9750                 }
9751             }
9752         }
9753         return ret;
9754 #endif
9755 #ifdef TARGET_NR_swapoff
9756     case TARGET_NR_swapoff:
9757         if (!(p = lock_user_string(arg1)))
9758             return -TARGET_EFAULT;
9759         ret = get_errno(swapoff(p));
9760         unlock_user(p, arg1, 0);
9761         return ret;
9762 #endif
9763     case TARGET_NR_sysinfo:
9764         {
9765             struct target_sysinfo *target_value;
9766             struct sysinfo value;
9767             ret = get_errno(sysinfo(&value));
9768             if (!is_error(ret) && arg1)
9769             {
9770                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9771                     return -TARGET_EFAULT;
9772                 __put_user(value.uptime, &target_value->uptime);
9773                 __put_user(value.loads[0], &target_value->loads[0]);
9774                 __put_user(value.loads[1], &target_value->loads[1]);
9775                 __put_user(value.loads[2], &target_value->loads[2]);
9776                 __put_user(value.totalram, &target_value->totalram);
9777                 __put_user(value.freeram, &target_value->freeram);
9778                 __put_user(value.sharedram, &target_value->sharedram);
9779                 __put_user(value.bufferram, &target_value->bufferram);
9780                 __put_user(value.totalswap, &target_value->totalswap);
9781                 __put_user(value.freeswap, &target_value->freeswap);
9782                 __put_user(value.procs, &target_value->procs);
9783                 __put_user(value.totalhigh, &target_value->totalhigh);
9784                 __put_user(value.freehigh, &target_value->freehigh);
9785                 __put_user(value.mem_unit, &target_value->mem_unit);
9786                 unlock_user_struct(target_value, arg1, 1);
9787             }
9788         }
9789         return ret;
9790 #ifdef TARGET_NR_ipc
9791     case TARGET_NR_ipc:
9792         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9793 #endif
9794 #ifdef TARGET_NR_semget
9795     case TARGET_NR_semget:
9796         return get_errno(semget(arg1, arg2, arg3));
9797 #endif
9798 #ifdef TARGET_NR_semop
9799     case TARGET_NR_semop:
9800         return do_semtimedop(arg1, arg2, arg3, 0);
9801 #endif
9802 #ifdef TARGET_NR_semtimedop
9803     case TARGET_NR_semtimedop:
9804         return do_semtimedop(arg1, arg2, arg3, arg4);
9805 #endif
9806 #ifdef TARGET_NR_semctl
9807     case TARGET_NR_semctl:
9808         return do_semctl(arg1, arg2, arg3, arg4);
9809 #endif
9810 #ifdef TARGET_NR_msgctl
9811     case TARGET_NR_msgctl:
9812         return do_msgctl(arg1, arg2, arg3);
9813 #endif
9814 #ifdef TARGET_NR_msgget
9815     case TARGET_NR_msgget:
9816         return get_errno(msgget(arg1, arg2));
9817 #endif
9818 #ifdef TARGET_NR_msgrcv
9819     case TARGET_NR_msgrcv:
9820         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9821 #endif
9822 #ifdef TARGET_NR_msgsnd
9823     case TARGET_NR_msgsnd:
9824         return do_msgsnd(arg1, arg2, arg3, arg4);
9825 #endif
9826 #ifdef TARGET_NR_shmget
9827     case TARGET_NR_shmget:
9828         return get_errno(shmget(arg1, arg2, arg3));
9829 #endif
9830 #ifdef TARGET_NR_shmctl
9831     case TARGET_NR_shmctl:
9832         return do_shmctl(arg1, arg2, arg3);
9833 #endif
9834 #ifdef TARGET_NR_shmat
9835     case TARGET_NR_shmat:
9836         return do_shmat(cpu_env, arg1, arg2, arg3);
9837 #endif
9838 #ifdef TARGET_NR_shmdt
9839     case TARGET_NR_shmdt:
9840         return do_shmdt(arg1);
9841 #endif
9842     case TARGET_NR_fsync:
9843         return get_errno(fsync(arg1));
9844     case TARGET_NR_clone:
9845         /* Linux manages to have three different orderings for its
9846          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9847          * match the kernel's CONFIG_CLONE_* settings.
9848          * Microblaze is further special in that it uses a sixth
9849          * implicit argument to clone for the TLS pointer.
9850          */
9851 #if defined(TARGET_MICROBLAZE)
9852         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9853 #elif defined(TARGET_CLONE_BACKWARDS)
9854         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9855 #elif defined(TARGET_CLONE_BACKWARDS2)
9856         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9857 #else
9858         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9859 #endif
9860         return ret;
9861 #ifdef __NR_exit_group
9862         /* new thread calls */
9863     case TARGET_NR_exit_group:
9864         preexit_cleanup(cpu_env, arg1);
9865         return get_errno(exit_group(arg1));
9866 #endif
9867     case TARGET_NR_setdomainname:
9868         if (!(p = lock_user_string(arg1)))
9869             return -TARGET_EFAULT;
9870         ret = get_errno(setdomainname(p, arg2));
9871         unlock_user(p, arg1, 0);
9872         return ret;
9873     case TARGET_NR_uname:
9874         /* no need to transcode because we use the linux syscall */
9875         {
9876             struct new_utsname * buf;
9877 
9878             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9879                 return -TARGET_EFAULT;
9880             ret = get_errno(sys_uname(buf));
9881             if (!is_error(ret)) {
9882                 /* Overwrite the native machine name with whatever is being
9883                    emulated. */
9884                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9885                           sizeof(buf->machine));
9886                 /* Allow the user to override the reported release.  */
9887                 if (qemu_uname_release && *qemu_uname_release) {
9888                     g_strlcpy(buf->release, qemu_uname_release,
9889                               sizeof(buf->release));
9890                 }
9891             }
9892             unlock_user_struct(buf, arg1, 1);
9893         }
9894         return ret;
9895 #ifdef TARGET_I386
9896     case TARGET_NR_modify_ldt:
9897         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9898 #if !defined(TARGET_X86_64)
9899     case TARGET_NR_vm86:
9900         return do_vm86(cpu_env, arg1, arg2);
9901 #endif
9902 #endif
9903 #if defined(TARGET_NR_adjtimex)
9904     case TARGET_NR_adjtimex:
9905         {
9906             struct timex host_buf;
9907 
9908             if (target_to_host_timex(&host_buf, arg1) != 0) {
9909                 return -TARGET_EFAULT;
9910             }
9911             ret = get_errno(adjtimex(&host_buf));
9912             if (!is_error(ret)) {
9913                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9914                     return -TARGET_EFAULT;
9915                 }
9916             }
9917         }
9918         return ret;
9919 #endif
9920 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9921     case TARGET_NR_clock_adjtime:
9922         {
9923             struct timex htx, *phtx = &htx;
9924 
9925             if (target_to_host_timex(phtx, arg2) != 0) {
9926                 return -TARGET_EFAULT;
9927             }
9928             ret = get_errno(clock_adjtime(arg1, phtx));
9929             if (!is_error(ret) && phtx) {
9930                 if (host_to_target_timex(arg2, phtx) != 0) {
9931                     return -TARGET_EFAULT;
9932                 }
9933             }
9934         }
9935         return ret;
9936 #endif
9937     case TARGET_NR_getpgid:
9938         return get_errno(getpgid(arg1));
9939     case TARGET_NR_fchdir:
9940         return get_errno(fchdir(arg1));
9941     case TARGET_NR_personality:
9942         return get_errno(personality(arg1));
9943 #ifdef TARGET_NR__llseek /* Not on alpha */
9944     case TARGET_NR__llseek:
9945         {
9946             int64_t res;
9947 #if !defined(__NR_llseek)
9948             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9949             if (res == -1) {
9950                 ret = get_errno(res);
9951             } else {
9952                 ret = 0;
9953             }
9954 #else
9955             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9956 #endif
9957             if ((ret == 0) && put_user_s64(res, arg4)) {
9958                 return -TARGET_EFAULT;
9959             }
9960         }
9961         return ret;
9962 #endif
9963 #ifdef TARGET_NR_getdents
9964     case TARGET_NR_getdents:
9965 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9966 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9967         {
9968             struct target_dirent *target_dirp;
9969             struct linux_dirent *dirp;
9970             abi_long count = arg3;
9971 
9972             dirp = g_try_malloc(count);
9973             if (!dirp) {
9974                 return -TARGET_ENOMEM;
9975             }
9976 
9977             ret = get_errno(sys_getdents(arg1, dirp, count));
9978             if (!is_error(ret)) {
9979                 struct linux_dirent *de;
9980 		struct target_dirent *tde;
9981                 int len = ret;
9982                 int reclen, treclen;
9983 		int count1, tnamelen;
9984 
9985 		count1 = 0;
9986                 de = dirp;
9987                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9988                     return -TARGET_EFAULT;
9989 		tde = target_dirp;
9990                 while (len > 0) {
9991                     reclen = de->d_reclen;
9992                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9993                     assert(tnamelen >= 0);
9994                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9995                     assert(count1 + treclen <= count);
9996                     tde->d_reclen = tswap16(treclen);
9997                     tde->d_ino = tswapal(de->d_ino);
9998                     tde->d_off = tswapal(de->d_off);
9999                     memcpy(tde->d_name, de->d_name, tnamelen);
10000                     de = (struct linux_dirent *)((char *)de + reclen);
10001                     len -= reclen;
10002                     tde = (struct target_dirent *)((char *)tde + treclen);
10003 		    count1 += treclen;
10004                 }
10005 		ret = count1;
10006                 unlock_user(target_dirp, arg2, ret);
10007             }
10008             g_free(dirp);
10009         }
10010 #else
10011         {
10012             struct linux_dirent *dirp;
10013             abi_long count = arg3;
10014 
10015             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10016                 return -TARGET_EFAULT;
10017             ret = get_errno(sys_getdents(arg1, dirp, count));
10018             if (!is_error(ret)) {
10019                 struct linux_dirent *de;
10020                 int len = ret;
10021                 int reclen;
10022                 de = dirp;
10023                 while (len > 0) {
10024                     reclen = de->d_reclen;
10025                     if (reclen > len)
10026                         break;
10027                     de->d_reclen = tswap16(reclen);
10028                     tswapls(&de->d_ino);
10029                     tswapls(&de->d_off);
10030                     de = (struct linux_dirent *)((char *)de + reclen);
10031                     len -= reclen;
10032                 }
10033             }
10034             unlock_user(dirp, arg2, ret);
10035         }
10036 #endif
10037 #else
10038         /* Implement getdents in terms of getdents64 */
10039         {
10040             struct linux_dirent64 *dirp;
10041             abi_long count = arg3;
10042 
10043             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10044             if (!dirp) {
10045                 return -TARGET_EFAULT;
10046             }
10047             ret = get_errno(sys_getdents64(arg1, dirp, count));
10048             if (!is_error(ret)) {
10049                 /* Convert the dirent64 structs to target dirent.  We do this
10050                  * in-place, since we can guarantee that a target_dirent is no
10051                  * larger than a dirent64; however this means we have to be
10052                  * careful to read everything before writing in the new format.
10053                  */
10054                 struct linux_dirent64 *de;
10055                 struct target_dirent *tde;
10056                 int len = ret;
10057                 int tlen = 0;
10058 
10059                 de = dirp;
10060                 tde = (struct target_dirent *)dirp;
10061                 while (len > 0) {
10062                     int namelen, treclen;
10063                     int reclen = de->d_reclen;
10064                     uint64_t ino = de->d_ino;
10065                     int64_t off = de->d_off;
10066                     uint8_t type = de->d_type;
10067 
10068                     namelen = strlen(de->d_name);
10069                     treclen = offsetof(struct target_dirent, d_name)
10070                         + namelen + 2;
10071                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10072 
10073                     memmove(tde->d_name, de->d_name, namelen + 1);
10074                     tde->d_ino = tswapal(ino);
10075                     tde->d_off = tswapal(off);
10076                     tde->d_reclen = tswap16(treclen);
10077                     /* The target_dirent type is in what was formerly a padding
10078                      * byte at the end of the structure:
10079                      */
10080                     *(((char *)tde) + treclen - 1) = type;
10081 
10082                     de = (struct linux_dirent64 *)((char *)de + reclen);
10083                     tde = (struct target_dirent *)((char *)tde + treclen);
10084                     len -= reclen;
10085                     tlen += treclen;
10086                 }
10087                 ret = tlen;
10088             }
10089             unlock_user(dirp, arg2, ret);
10090         }
10091 #endif
10092         return ret;
10093 #endif /* TARGET_NR_getdents */
10094 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10095     case TARGET_NR_getdents64:
10096         {
10097             struct linux_dirent64 *dirp;
10098             abi_long count = arg3;
10099             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10100                 return -TARGET_EFAULT;
10101             ret = get_errno(sys_getdents64(arg1, dirp, count));
10102             if (!is_error(ret)) {
10103                 struct linux_dirent64 *de;
10104                 int len = ret;
10105                 int reclen;
10106                 de = dirp;
10107                 while (len > 0) {
10108                     reclen = de->d_reclen;
10109                     if (reclen > len)
10110                         break;
10111                     de->d_reclen = tswap16(reclen);
10112                     tswap64s((uint64_t *)&de->d_ino);
10113                     tswap64s((uint64_t *)&de->d_off);
10114                     de = (struct linux_dirent64 *)((char *)de + reclen);
10115                     len -= reclen;
10116                 }
10117             }
10118             unlock_user(dirp, arg2, ret);
10119         }
10120         return ret;
10121 #endif /* TARGET_NR_getdents64 */
10122 #if defined(TARGET_NR__newselect)
10123     case TARGET_NR__newselect:
10124         return do_select(arg1, arg2, arg3, arg4, arg5);
10125 #endif
10126 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10127 # ifdef TARGET_NR_poll
10128     case TARGET_NR_poll:
10129 # endif
10130 # ifdef TARGET_NR_ppoll
10131     case TARGET_NR_ppoll:
10132 # endif
10133         {
10134             struct target_pollfd *target_pfd;
10135             unsigned int nfds = arg2;
10136             struct pollfd *pfd;
10137             unsigned int i;
10138 
10139             pfd = NULL;
10140             target_pfd = NULL;
10141             if (nfds) {
10142                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10143                     return -TARGET_EINVAL;
10144                 }
10145 
10146                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10147                                        sizeof(struct target_pollfd) * nfds, 1);
10148                 if (!target_pfd) {
10149                     return -TARGET_EFAULT;
10150                 }
10151 
10152                 pfd = alloca(sizeof(struct pollfd) * nfds);
10153                 for (i = 0; i < nfds; i++) {
10154                     pfd[i].fd = tswap32(target_pfd[i].fd);
10155                     pfd[i].events = tswap16(target_pfd[i].events);
10156                 }
10157             }
10158 
10159             switch (num) {
10160 # ifdef TARGET_NR_ppoll
10161             case TARGET_NR_ppoll:
10162             {
10163                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10164                 target_sigset_t *target_set;
10165                 sigset_t _set, *set = &_set;
10166 
10167                 if (arg3) {
10168                     if (target_to_host_timespec(timeout_ts, arg3)) {
10169                         unlock_user(target_pfd, arg1, 0);
10170                         return -TARGET_EFAULT;
10171                     }
10172                 } else {
10173                     timeout_ts = NULL;
10174                 }
10175 
10176                 if (arg4) {
10177                     if (arg5 != sizeof(target_sigset_t)) {
10178                         unlock_user(target_pfd, arg1, 0);
10179                         return -TARGET_EINVAL;
10180                     }
10181 
10182                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10183                     if (!target_set) {
10184                         unlock_user(target_pfd, arg1, 0);
10185                         return -TARGET_EFAULT;
10186                     }
10187                     target_to_host_sigset(set, target_set);
10188                 } else {
10189                     set = NULL;
10190                 }
10191 
10192                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10193                                            set, SIGSET_T_SIZE));
10194 
10195                 if (!is_error(ret) && arg3) {
10196                     host_to_target_timespec(arg3, timeout_ts);
10197                 }
10198                 if (arg4) {
10199                     unlock_user(target_set, arg4, 0);
10200                 }
10201                 break;
10202             }
10203 # endif
10204 # ifdef TARGET_NR_poll
10205             case TARGET_NR_poll:
10206             {
10207                 struct timespec ts, *pts;
10208 
10209                 if (arg3 >= 0) {
10210                     /* Convert ms to secs, ns */
10211                     ts.tv_sec = arg3 / 1000;
10212                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10213                     pts = &ts;
10214                 } else {
10215                     /* -ve poll() timeout means "infinite" */
10216                     pts = NULL;
10217                 }
10218                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10219                 break;
10220             }
10221 # endif
10222             default:
10223                 g_assert_not_reached();
10224             }
10225 
10226             if (!is_error(ret)) {
10227                 for(i = 0; i < nfds; i++) {
10228                     target_pfd[i].revents = tswap16(pfd[i].revents);
10229                 }
10230             }
10231             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10232         }
10233         return ret;
10234 #endif
10235     case TARGET_NR_flock:
10236         /* NOTE: the flock constant seems to be the same for every
10237            Linux platform */
10238         return get_errno(safe_flock(arg1, arg2));
10239     case TARGET_NR_readv:
10240         {
10241             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10242             if (vec != NULL) {
10243                 ret = get_errno(safe_readv(arg1, vec, arg3));
10244                 unlock_iovec(vec, arg2, arg3, 1);
10245             } else {
10246                 ret = -host_to_target_errno(errno);
10247             }
10248         }
10249         return ret;
10250     case TARGET_NR_writev:
10251         {
10252             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10253             if (vec != NULL) {
10254                 ret = get_errno(safe_writev(arg1, vec, arg3));
10255                 unlock_iovec(vec, arg2, arg3, 0);
10256             } else {
10257                 ret = -host_to_target_errno(errno);
10258             }
10259         }
10260         return ret;
10261 #if defined(TARGET_NR_preadv)
10262     case TARGET_NR_preadv:
10263         {
10264             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10265             if (vec != NULL) {
10266                 unsigned long low, high;
10267 
10268                 target_to_host_low_high(arg4, arg5, &low, &high);
10269                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10270                 unlock_iovec(vec, arg2, arg3, 1);
10271             } else {
10272                 ret = -host_to_target_errno(errno);
10273            }
10274         }
10275         return ret;
10276 #endif
10277 #if defined(TARGET_NR_pwritev)
10278     case TARGET_NR_pwritev:
10279         {
10280             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10281             if (vec != NULL) {
10282                 unsigned long low, high;
10283 
10284                 target_to_host_low_high(arg4, arg5, &low, &high);
10285                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10286                 unlock_iovec(vec, arg2, arg3, 0);
10287             } else {
10288                 ret = -host_to_target_errno(errno);
10289            }
10290         }
10291         return ret;
10292 #endif
10293     case TARGET_NR_getsid:
10294         return get_errno(getsid(arg1));
10295 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10296     case TARGET_NR_fdatasync:
10297         return get_errno(fdatasync(arg1));
10298 #endif
10299 #ifdef TARGET_NR__sysctl
10300     case TARGET_NR__sysctl:
10301         /* We don't implement this, but ENOTDIR is always a safe
10302            return value. */
10303         return -TARGET_ENOTDIR;
10304 #endif
10305     case TARGET_NR_sched_getaffinity:
10306         {
10307             unsigned int mask_size;
10308             unsigned long *mask;
10309 
10310             /*
10311              * sched_getaffinity needs multiples of ulong, so need to take
10312              * care of mismatches between target ulong and host ulong sizes.
10313              */
10314             if (arg2 & (sizeof(abi_ulong) - 1)) {
10315                 return -TARGET_EINVAL;
10316             }
10317             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10318 
10319             mask = alloca(mask_size);
10320             memset(mask, 0, mask_size);
10321             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10322 
10323             if (!is_error(ret)) {
10324                 if (ret > arg2) {
10325                     /* More data returned than the caller's buffer will fit.
10326                      * This only happens if sizeof(abi_long) < sizeof(long)
10327                      * and the caller passed us a buffer holding an odd number
10328                      * of abi_longs. If the host kernel is actually using the
10329                      * extra 4 bytes then fail EINVAL; otherwise we can just
10330                      * ignore them and only copy the interesting part.
10331                      */
10332                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10333                     if (numcpus > arg2 * 8) {
10334                         return -TARGET_EINVAL;
10335                     }
10336                     ret = arg2;
10337                 }
10338 
10339                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10340                     return -TARGET_EFAULT;
10341                 }
10342             }
10343         }
10344         return ret;
10345     case TARGET_NR_sched_setaffinity:
10346         {
10347             unsigned int mask_size;
10348             unsigned long *mask;
10349 
10350             /*
10351              * sched_setaffinity needs multiples of ulong, so need to take
10352              * care of mismatches between target ulong and host ulong sizes.
10353              */
10354             if (arg2 & (sizeof(abi_ulong) - 1)) {
10355                 return -TARGET_EINVAL;
10356             }
10357             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10358             mask = alloca(mask_size);
10359 
10360             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10361             if (ret) {
10362                 return ret;
10363             }
10364 
10365             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10366         }
10367     case TARGET_NR_getcpu:
10368         {
10369             unsigned cpu, node;
10370             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10371                                        arg2 ? &node : NULL,
10372                                        NULL));
10373             if (is_error(ret)) {
10374                 return ret;
10375             }
10376             if (arg1 && put_user_u32(cpu, arg1)) {
10377                 return -TARGET_EFAULT;
10378             }
10379             if (arg2 && put_user_u32(node, arg2)) {
10380                 return -TARGET_EFAULT;
10381             }
10382         }
10383         return ret;
10384     case TARGET_NR_sched_setparam:
10385         {
10386             struct sched_param *target_schp;
10387             struct sched_param schp;
10388 
10389             if (arg2 == 0) {
10390                 return -TARGET_EINVAL;
10391             }
10392             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10393                 return -TARGET_EFAULT;
10394             schp.sched_priority = tswap32(target_schp->sched_priority);
10395             unlock_user_struct(target_schp, arg2, 0);
10396             return get_errno(sched_setparam(arg1, &schp));
10397         }
10398     case TARGET_NR_sched_getparam:
10399         {
10400             struct sched_param *target_schp;
10401             struct sched_param schp;
10402 
10403             if (arg2 == 0) {
10404                 return -TARGET_EINVAL;
10405             }
10406             ret = get_errno(sched_getparam(arg1, &schp));
10407             if (!is_error(ret)) {
10408                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10409                     return -TARGET_EFAULT;
10410                 target_schp->sched_priority = tswap32(schp.sched_priority);
10411                 unlock_user_struct(target_schp, arg2, 1);
10412             }
10413         }
10414         return ret;
10415     case TARGET_NR_sched_setscheduler:
10416         {
10417             struct sched_param *target_schp;
10418             struct sched_param schp;
10419             if (arg3 == 0) {
10420                 return -TARGET_EINVAL;
10421             }
10422             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10423                 return -TARGET_EFAULT;
10424             schp.sched_priority = tswap32(target_schp->sched_priority);
10425             unlock_user_struct(target_schp, arg3, 0);
10426             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10427         }
10428     case TARGET_NR_sched_getscheduler:
10429         return get_errno(sched_getscheduler(arg1));
10430     case TARGET_NR_sched_yield:
10431         return get_errno(sched_yield());
10432     case TARGET_NR_sched_get_priority_max:
10433         return get_errno(sched_get_priority_max(arg1));
10434     case TARGET_NR_sched_get_priority_min:
10435         return get_errno(sched_get_priority_min(arg1));
10436 #ifdef TARGET_NR_sched_rr_get_interval
10437     case TARGET_NR_sched_rr_get_interval:
10438         {
10439             struct timespec ts;
10440             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10441             if (!is_error(ret)) {
10442                 ret = host_to_target_timespec(arg2, &ts);
10443             }
10444         }
10445         return ret;
10446 #endif
10447 #if defined(TARGET_NR_nanosleep)
10448     case TARGET_NR_nanosleep:
10449         {
10450             struct timespec req, rem;
10451             target_to_host_timespec(&req, arg1);
10452             ret = get_errno(safe_nanosleep(&req, &rem));
10453             if (is_error(ret) && arg2) {
10454                 host_to_target_timespec(arg2, &rem);
10455             }
10456         }
10457         return ret;
10458 #endif
10459     case TARGET_NR_prctl:
10460         switch (arg1) {
10461         case PR_GET_PDEATHSIG:
10462         {
10463             int deathsig;
10464             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10465             if (!is_error(ret) && arg2
10466                 && put_user_ual(deathsig, arg2)) {
10467                 return -TARGET_EFAULT;
10468             }
10469             return ret;
10470         }
10471 #ifdef PR_GET_NAME
10472         case PR_GET_NAME:
10473         {
10474             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10475             if (!name) {
10476                 return -TARGET_EFAULT;
10477             }
10478             ret = get_errno(prctl(arg1, (unsigned long)name,
10479                                   arg3, arg4, arg5));
10480             unlock_user(name, arg2, 16);
10481             return ret;
10482         }
10483         case PR_SET_NAME:
10484         {
10485             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10486             if (!name) {
10487                 return -TARGET_EFAULT;
10488             }
10489             ret = get_errno(prctl(arg1, (unsigned long)name,
10490                                   arg3, arg4, arg5));
10491             unlock_user(name, arg2, 0);
10492             return ret;
10493         }
10494 #endif
10495 #ifdef TARGET_MIPS
10496         case TARGET_PR_GET_FP_MODE:
10497         {
10498             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10499             ret = 0;
10500             if (env->CP0_Status & (1 << CP0St_FR)) {
10501                 ret |= TARGET_PR_FP_MODE_FR;
10502             }
10503             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10504                 ret |= TARGET_PR_FP_MODE_FRE;
10505             }
10506             return ret;
10507         }
10508         case TARGET_PR_SET_FP_MODE:
10509         {
10510             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10511             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10512             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10513             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10514             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10515 
10516             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10517                                             TARGET_PR_FP_MODE_FRE;
10518 
10519             /* If nothing to change, return right away, successfully.  */
10520             if (old_fr == new_fr && old_fre == new_fre) {
10521                 return 0;
10522             }
10523             /* Check the value is valid */
10524             if (arg2 & ~known_bits) {
10525                 return -TARGET_EOPNOTSUPP;
10526             }
10527             /* Setting FRE without FR is not supported.  */
10528             if (new_fre && !new_fr) {
10529                 return -TARGET_EOPNOTSUPP;
10530             }
10531             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10532                 /* FR1 is not supported */
10533                 return -TARGET_EOPNOTSUPP;
10534             }
10535             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10536                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10537                 /* cannot set FR=0 */
10538                 return -TARGET_EOPNOTSUPP;
10539             }
10540             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10541                 /* Cannot set FRE=1 */
10542                 return -TARGET_EOPNOTSUPP;
10543             }
10544 
10545             int i;
10546             fpr_t *fpr = env->active_fpu.fpr;
10547             for (i = 0; i < 32 ; i += 2) {
10548                 if (!old_fr && new_fr) {
10549                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10550                 } else if (old_fr && !new_fr) {
10551                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10552                 }
10553             }
10554 
10555             if (new_fr) {
10556                 env->CP0_Status |= (1 << CP0St_FR);
10557                 env->hflags |= MIPS_HFLAG_F64;
10558             } else {
10559                 env->CP0_Status &= ~(1 << CP0St_FR);
10560                 env->hflags &= ~MIPS_HFLAG_F64;
10561             }
10562             if (new_fre) {
10563                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10564                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10565                     env->hflags |= MIPS_HFLAG_FRE;
10566                 }
10567             } else {
10568                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10569                 env->hflags &= ~MIPS_HFLAG_FRE;
10570             }
10571 
10572             return 0;
10573         }
10574 #endif /* MIPS */
10575 #ifdef TARGET_AARCH64
10576         case TARGET_PR_SVE_SET_VL:
10577             /*
10578              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10579              * PR_SVE_VL_INHERIT.  Note the kernel definition
10580              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10581              * even though the current architectural maximum is VQ=16.
10582              */
10583             ret = -TARGET_EINVAL;
10584             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10585                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10586                 CPUARMState *env = cpu_env;
10587                 ARMCPU *cpu = env_archcpu(env);
10588                 uint32_t vq, old_vq;
10589 
10590                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10591                 vq = MAX(arg2 / 16, 1);
10592                 vq = MIN(vq, cpu->sve_max_vq);
10593 
10594                 if (vq < old_vq) {
10595                     aarch64_sve_narrow_vq(env, vq);
10596                 }
10597                 env->vfp.zcr_el[1] = vq - 1;
10598                 arm_rebuild_hflags(env);
10599                 ret = vq * 16;
10600             }
10601             return ret;
10602         case TARGET_PR_SVE_GET_VL:
10603             ret = -TARGET_EINVAL;
10604             {
10605                 ARMCPU *cpu = env_archcpu(cpu_env);
10606                 if (cpu_isar_feature(aa64_sve, cpu)) {
10607                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10608                 }
10609             }
10610             return ret;
10611         case TARGET_PR_PAC_RESET_KEYS:
10612             {
10613                 CPUARMState *env = cpu_env;
10614                 ARMCPU *cpu = env_archcpu(env);
10615 
10616                 if (arg3 || arg4 || arg5) {
10617                     return -TARGET_EINVAL;
10618                 }
10619                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10620                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10621                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10622                                TARGET_PR_PAC_APGAKEY);
10623                     int ret = 0;
10624                     Error *err = NULL;
10625 
10626                     if (arg2 == 0) {
10627                         arg2 = all;
10628                     } else if (arg2 & ~all) {
10629                         return -TARGET_EINVAL;
10630                     }
10631                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10632                         ret |= qemu_guest_getrandom(&env->keys.apia,
10633                                                     sizeof(ARMPACKey), &err);
10634                     }
10635                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10636                         ret |= qemu_guest_getrandom(&env->keys.apib,
10637                                                     sizeof(ARMPACKey), &err);
10638                     }
10639                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10640                         ret |= qemu_guest_getrandom(&env->keys.apda,
10641                                                     sizeof(ARMPACKey), &err);
10642                     }
10643                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10644                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10645                                                     sizeof(ARMPACKey), &err);
10646                     }
10647                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10648                         ret |= qemu_guest_getrandom(&env->keys.apga,
10649                                                     sizeof(ARMPACKey), &err);
10650                     }
10651                     if (ret != 0) {
10652                         /*
10653                          * Some unknown failure in the crypto.  The best
10654                          * we can do is log it and fail the syscall.
10655                          * The real syscall cannot fail this way.
10656                          */
10657                         qemu_log_mask(LOG_UNIMP,
10658                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10659                                       error_get_pretty(err));
10660                         error_free(err);
10661                         return -TARGET_EIO;
10662                     }
10663                     return 0;
10664                 }
10665             }
10666             return -TARGET_EINVAL;
10667 #endif /* AARCH64 */
10668         case PR_GET_SECCOMP:
10669         case PR_SET_SECCOMP:
10670             /* Disable seccomp to prevent the target disabling syscalls we
10671              * need. */
10672             return -TARGET_EINVAL;
10673         default:
10674             /* Most prctl options have no pointer arguments */
10675             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10676         }
10677         break;
10678 #ifdef TARGET_NR_arch_prctl
10679     case TARGET_NR_arch_prctl:
10680         return do_arch_prctl(cpu_env, arg1, arg2);
10681 #endif
10682 #ifdef TARGET_NR_pread64
10683     case TARGET_NR_pread64:
10684         if (regpairs_aligned(cpu_env, num)) {
10685             arg4 = arg5;
10686             arg5 = arg6;
10687         }
10688         if (arg2 == 0 && arg3 == 0) {
10689             /* Special-case NULL buffer and zero length, which should succeed */
10690             p = 0;
10691         } else {
10692             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10693             if (!p) {
10694                 return -TARGET_EFAULT;
10695             }
10696         }
10697         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10698         unlock_user(p, arg2, ret);
10699         return ret;
10700     case TARGET_NR_pwrite64:
10701         if (regpairs_aligned(cpu_env, num)) {
10702             arg4 = arg5;
10703             arg5 = arg6;
10704         }
10705         if (arg2 == 0 && arg3 == 0) {
10706             /* Special-case NULL buffer and zero length, which should succeed */
10707             p = 0;
10708         } else {
10709             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10710             if (!p) {
10711                 return -TARGET_EFAULT;
10712             }
10713         }
10714         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10715         unlock_user(p, arg2, 0);
10716         return ret;
10717 #endif
10718     case TARGET_NR_getcwd:
10719         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10720             return -TARGET_EFAULT;
10721         ret = get_errno(sys_getcwd1(p, arg2));
10722         unlock_user(p, arg1, ret);
10723         return ret;
10724     case TARGET_NR_capget:
10725     case TARGET_NR_capset:
10726     {
10727         struct target_user_cap_header *target_header;
10728         struct target_user_cap_data *target_data = NULL;
10729         struct __user_cap_header_struct header;
10730         struct __user_cap_data_struct data[2];
10731         struct __user_cap_data_struct *dataptr = NULL;
10732         int i, target_datalen;
10733         int data_items = 1;
10734 
10735         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10736             return -TARGET_EFAULT;
10737         }
10738         header.version = tswap32(target_header->version);
10739         header.pid = tswap32(target_header->pid);
10740 
10741         if (header.version != _LINUX_CAPABILITY_VERSION) {
10742             /* Version 2 and up takes pointer to two user_data structs */
10743             data_items = 2;
10744         }
10745 
10746         target_datalen = sizeof(*target_data) * data_items;
10747 
10748         if (arg2) {
10749             if (num == TARGET_NR_capget) {
10750                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10751             } else {
10752                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10753             }
10754             if (!target_data) {
10755                 unlock_user_struct(target_header, arg1, 0);
10756                 return -TARGET_EFAULT;
10757             }
10758 
10759             if (num == TARGET_NR_capset) {
10760                 for (i = 0; i < data_items; i++) {
10761                     data[i].effective = tswap32(target_data[i].effective);
10762                     data[i].permitted = tswap32(target_data[i].permitted);
10763                     data[i].inheritable = tswap32(target_data[i].inheritable);
10764                 }
10765             }
10766 
10767             dataptr = data;
10768         }
10769 
10770         if (num == TARGET_NR_capget) {
10771             ret = get_errno(capget(&header, dataptr));
10772         } else {
10773             ret = get_errno(capset(&header, dataptr));
10774         }
10775 
10776         /* The kernel always updates version for both capget and capset */
10777         target_header->version = tswap32(header.version);
10778         unlock_user_struct(target_header, arg1, 1);
10779 
10780         if (arg2) {
10781             if (num == TARGET_NR_capget) {
10782                 for (i = 0; i < data_items; i++) {
10783                     target_data[i].effective = tswap32(data[i].effective);
10784                     target_data[i].permitted = tswap32(data[i].permitted);
10785                     target_data[i].inheritable = tswap32(data[i].inheritable);
10786                 }
10787                 unlock_user(target_data, arg2, target_datalen);
10788             } else {
10789                 unlock_user(target_data, arg2, 0);
10790             }
10791         }
10792         return ret;
10793     }
10794     case TARGET_NR_sigaltstack:
10795         return do_sigaltstack(arg1, arg2,
10796                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10797 
10798 #ifdef CONFIG_SENDFILE
10799 #ifdef TARGET_NR_sendfile
10800     case TARGET_NR_sendfile:
10801     {
10802         off_t *offp = NULL;
10803         off_t off;
10804         if (arg3) {
10805             ret = get_user_sal(off, arg3);
10806             if (is_error(ret)) {
10807                 return ret;
10808             }
10809             offp = &off;
10810         }
10811         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10812         if (!is_error(ret) && arg3) {
10813             abi_long ret2 = put_user_sal(off, arg3);
10814             if (is_error(ret2)) {
10815                 ret = ret2;
10816             }
10817         }
10818         return ret;
10819     }
10820 #endif
10821 #ifdef TARGET_NR_sendfile64
10822     case TARGET_NR_sendfile64:
10823     {
10824         off_t *offp = NULL;
10825         off_t off;
10826         if (arg3) {
10827             ret = get_user_s64(off, arg3);
10828             if (is_error(ret)) {
10829                 return ret;
10830             }
10831             offp = &off;
10832         }
10833         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10834         if (!is_error(ret) && arg3) {
10835             abi_long ret2 = put_user_s64(off, arg3);
10836             if (is_error(ret2)) {
10837                 ret = ret2;
10838             }
10839         }
10840         return ret;
10841     }
10842 #endif
10843 #endif
10844 #ifdef TARGET_NR_vfork
10845     case TARGET_NR_vfork:
10846         return get_errno(do_fork(cpu_env,
10847                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10848                          0, 0, 0, 0));
10849 #endif
10850 #ifdef TARGET_NR_ugetrlimit
10851     case TARGET_NR_ugetrlimit:
10852     {
10853 	struct rlimit rlim;
10854 	int resource = target_to_host_resource(arg1);
10855 	ret = get_errno(getrlimit(resource, &rlim));
10856 	if (!is_error(ret)) {
10857 	    struct target_rlimit *target_rlim;
10858             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10859                 return -TARGET_EFAULT;
10860 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10861 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10862             unlock_user_struct(target_rlim, arg2, 1);
10863 	}
10864         return ret;
10865     }
10866 #endif
10867 #ifdef TARGET_NR_truncate64
10868     case TARGET_NR_truncate64:
10869         if (!(p = lock_user_string(arg1)))
10870             return -TARGET_EFAULT;
10871 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10872         unlock_user(p, arg1, 0);
10873         return ret;
10874 #endif
10875 #ifdef TARGET_NR_ftruncate64
10876     case TARGET_NR_ftruncate64:
10877         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10878 #endif
10879 #ifdef TARGET_NR_stat64
10880     case TARGET_NR_stat64:
10881         if (!(p = lock_user_string(arg1))) {
10882             return -TARGET_EFAULT;
10883         }
10884         ret = get_errno(stat(path(p), &st));
10885         unlock_user(p, arg1, 0);
10886         if (!is_error(ret))
10887             ret = host_to_target_stat64(cpu_env, arg2, &st);
10888         return ret;
10889 #endif
10890 #ifdef TARGET_NR_lstat64
10891     case TARGET_NR_lstat64:
10892         if (!(p = lock_user_string(arg1))) {
10893             return -TARGET_EFAULT;
10894         }
10895         ret = get_errno(lstat(path(p), &st));
10896         unlock_user(p, arg1, 0);
10897         if (!is_error(ret))
10898             ret = host_to_target_stat64(cpu_env, arg2, &st);
10899         return ret;
10900 #endif
10901 #ifdef TARGET_NR_fstat64
10902     case TARGET_NR_fstat64:
10903         ret = get_errno(fstat(arg1, &st));
10904         if (!is_error(ret))
10905             ret = host_to_target_stat64(cpu_env, arg2, &st);
10906         return ret;
10907 #endif
10908 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10909 #ifdef TARGET_NR_fstatat64
10910     case TARGET_NR_fstatat64:
10911 #endif
10912 #ifdef TARGET_NR_newfstatat
10913     case TARGET_NR_newfstatat:
10914 #endif
10915         if (!(p = lock_user_string(arg2))) {
10916             return -TARGET_EFAULT;
10917         }
10918         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10919         unlock_user(p, arg2, 0);
10920         if (!is_error(ret))
10921             ret = host_to_target_stat64(cpu_env, arg3, &st);
10922         return ret;
10923 #endif
10924 #if defined(TARGET_NR_statx)
10925     case TARGET_NR_statx:
10926         {
10927             struct target_statx *target_stx;
10928             int dirfd = arg1;
10929             int flags = arg3;
10930 
10931             p = lock_user_string(arg2);
10932             if (p == NULL) {
10933                 return -TARGET_EFAULT;
10934             }
10935 #if defined(__NR_statx)
10936             {
10937                 /*
10938                  * It is assumed that struct statx is architecture independent.
10939                  */
10940                 struct target_statx host_stx;
10941                 int mask = arg4;
10942 
10943                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10944                 if (!is_error(ret)) {
10945                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10946                         unlock_user(p, arg2, 0);
10947                         return -TARGET_EFAULT;
10948                     }
10949                 }
10950 
10951                 if (ret != -TARGET_ENOSYS) {
10952                     unlock_user(p, arg2, 0);
10953                     return ret;
10954                 }
10955             }
10956 #endif
10957             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10958             unlock_user(p, arg2, 0);
10959 
10960             if (!is_error(ret)) {
10961                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10962                     return -TARGET_EFAULT;
10963                 }
10964                 memset(target_stx, 0, sizeof(*target_stx));
10965                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10966                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10967                 __put_user(st.st_ino, &target_stx->stx_ino);
10968                 __put_user(st.st_mode, &target_stx->stx_mode);
10969                 __put_user(st.st_uid, &target_stx->stx_uid);
10970                 __put_user(st.st_gid, &target_stx->stx_gid);
10971                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10972                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10973                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10974                 __put_user(st.st_size, &target_stx->stx_size);
10975                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10976                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10977                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10978                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10979                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10980                 unlock_user_struct(target_stx, arg5, 1);
10981             }
10982         }
10983         return ret;
10984 #endif
10985 #ifdef TARGET_NR_lchown
10986     case TARGET_NR_lchown:
10987         if (!(p = lock_user_string(arg1)))
10988             return -TARGET_EFAULT;
10989         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10990         unlock_user(p, arg1, 0);
10991         return ret;
10992 #endif
10993 #ifdef TARGET_NR_getuid
10994     case TARGET_NR_getuid:
10995         return get_errno(high2lowuid(getuid()));
10996 #endif
10997 #ifdef TARGET_NR_getgid
10998     case TARGET_NR_getgid:
10999         return get_errno(high2lowgid(getgid()));
11000 #endif
11001 #ifdef TARGET_NR_geteuid
11002     case TARGET_NR_geteuid:
11003         return get_errno(high2lowuid(geteuid()));
11004 #endif
11005 #ifdef TARGET_NR_getegid
11006     case TARGET_NR_getegid:
11007         return get_errno(high2lowgid(getegid()));
11008 #endif
11009     case TARGET_NR_setreuid:
11010         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11011     case TARGET_NR_setregid:
11012         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11013     case TARGET_NR_getgroups:
11014         {
11015             int gidsetsize = arg1;
11016             target_id *target_grouplist;
11017             gid_t *grouplist;
11018             int i;
11019 
11020             grouplist = alloca(gidsetsize * sizeof(gid_t));
11021             ret = get_errno(getgroups(gidsetsize, grouplist));
11022             if (gidsetsize == 0)
11023                 return ret;
11024             if (!is_error(ret)) {
11025                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11026                 if (!target_grouplist)
11027                     return -TARGET_EFAULT;
11028                 for(i = 0;i < ret; i++)
11029                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11030                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11031             }
11032         }
11033         return ret;
11034     case TARGET_NR_setgroups:
11035         {
11036             int gidsetsize = arg1;
11037             target_id *target_grouplist;
11038             gid_t *grouplist = NULL;
11039             int i;
11040             if (gidsetsize) {
11041                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11042                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11043                 if (!target_grouplist) {
11044                     return -TARGET_EFAULT;
11045                 }
11046                 for (i = 0; i < gidsetsize; i++) {
11047                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11048                 }
11049                 unlock_user(target_grouplist, arg2, 0);
11050             }
11051             return get_errno(setgroups(gidsetsize, grouplist));
11052         }
11053     case TARGET_NR_fchown:
11054         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11055 #if defined(TARGET_NR_fchownat)
11056     case TARGET_NR_fchownat:
11057         if (!(p = lock_user_string(arg2)))
11058             return -TARGET_EFAULT;
11059         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11060                                  low2highgid(arg4), arg5));
11061         unlock_user(p, arg2, 0);
11062         return ret;
11063 #endif
11064 #ifdef TARGET_NR_setresuid
11065     case TARGET_NR_setresuid:
11066         return get_errno(sys_setresuid(low2highuid(arg1),
11067                                        low2highuid(arg2),
11068                                        low2highuid(arg3)));
11069 #endif
11070 #ifdef TARGET_NR_getresuid
11071     case TARGET_NR_getresuid:
11072         {
11073             uid_t ruid, euid, suid;
11074             ret = get_errno(getresuid(&ruid, &euid, &suid));
11075             if (!is_error(ret)) {
11076                 if (put_user_id(high2lowuid(ruid), arg1)
11077                     || put_user_id(high2lowuid(euid), arg2)
11078                     || put_user_id(high2lowuid(suid), arg3))
11079                     return -TARGET_EFAULT;
11080             }
11081         }
11082         return ret;
11083 #endif
11084 #ifdef TARGET_NR_getresgid
11085     case TARGET_NR_setresgid:
11086         return get_errno(sys_setresgid(low2highgid(arg1),
11087                                        low2highgid(arg2),
11088                                        low2highgid(arg3)));
11089 #endif
11090 #ifdef TARGET_NR_getresgid
11091     case TARGET_NR_getresgid:
11092         {
11093             gid_t rgid, egid, sgid;
11094             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11095             if (!is_error(ret)) {
11096                 if (put_user_id(high2lowgid(rgid), arg1)
11097                     || put_user_id(high2lowgid(egid), arg2)
11098                     || put_user_id(high2lowgid(sgid), arg3))
11099                     return -TARGET_EFAULT;
11100             }
11101         }
11102         return ret;
11103 #endif
11104 #ifdef TARGET_NR_chown
11105     case TARGET_NR_chown:
11106         if (!(p = lock_user_string(arg1)))
11107             return -TARGET_EFAULT;
11108         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11109         unlock_user(p, arg1, 0);
11110         return ret;
11111 #endif
11112     case TARGET_NR_setuid:
11113         return get_errno(sys_setuid(low2highuid(arg1)));
11114     case TARGET_NR_setgid:
11115         return get_errno(sys_setgid(low2highgid(arg1)));
11116     case TARGET_NR_setfsuid:
11117         return get_errno(setfsuid(arg1));
11118     case TARGET_NR_setfsgid:
11119         return get_errno(setfsgid(arg1));
11120 
11121 #ifdef TARGET_NR_lchown32
11122     case TARGET_NR_lchown32:
11123         if (!(p = lock_user_string(arg1)))
11124             return -TARGET_EFAULT;
11125         ret = get_errno(lchown(p, arg2, arg3));
11126         unlock_user(p, arg1, 0);
11127         return ret;
11128 #endif
11129 #ifdef TARGET_NR_getuid32
11130     case TARGET_NR_getuid32:
11131         return get_errno(getuid());
11132 #endif
11133 
11134 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11135    /* Alpha specific */
11136     case TARGET_NR_getxuid:
11137          {
11138             uid_t euid;
11139             euid=geteuid();
11140             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11141          }
11142         return get_errno(getuid());
11143 #endif
11144 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11145    /* Alpha specific */
11146     case TARGET_NR_getxgid:
11147          {
11148             uid_t egid;
11149             egid=getegid();
11150             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11151          }
11152         return get_errno(getgid());
11153 #endif
11154 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11155     /* Alpha specific */
11156     case TARGET_NR_osf_getsysinfo:
11157         ret = -TARGET_EOPNOTSUPP;
11158         switch (arg1) {
11159           case TARGET_GSI_IEEE_FP_CONTROL:
11160             {
11161                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11162                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11163 
11164                 swcr &= ~SWCR_STATUS_MASK;
11165                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11166 
11167                 if (put_user_u64 (swcr, arg2))
11168                         return -TARGET_EFAULT;
11169                 ret = 0;
11170             }
11171             break;
11172 
11173           /* case GSI_IEEE_STATE_AT_SIGNAL:
11174              -- Not implemented in linux kernel.
11175              case GSI_UACPROC:
11176              -- Retrieves current unaligned access state; not much used.
11177              case GSI_PROC_TYPE:
11178              -- Retrieves implver information; surely not used.
11179              case GSI_GET_HWRPB:
11180              -- Grabs a copy of the HWRPB; surely not used.
11181           */
11182         }
11183         return ret;
11184 #endif
11185 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11186     /* Alpha specific */
11187     case TARGET_NR_osf_setsysinfo:
11188         ret = -TARGET_EOPNOTSUPP;
11189         switch (arg1) {
11190           case TARGET_SSI_IEEE_FP_CONTROL:
11191             {
11192                 uint64_t swcr, fpcr;
11193 
11194                 if (get_user_u64 (swcr, arg2)) {
11195                     return -TARGET_EFAULT;
11196                 }
11197 
11198                 /*
11199                  * The kernel calls swcr_update_status to update the
11200                  * status bits from the fpcr at every point that it
11201                  * could be queried.  Therefore, we store the status
11202                  * bits only in FPCR.
11203                  */
11204                 ((CPUAlphaState *)cpu_env)->swcr
11205                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11206 
11207                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11208                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11209                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11210                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11211                 ret = 0;
11212             }
11213             break;
11214 
11215           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11216             {
11217                 uint64_t exc, fpcr, fex;
11218 
11219                 if (get_user_u64(exc, arg2)) {
11220                     return -TARGET_EFAULT;
11221                 }
11222                 exc &= SWCR_STATUS_MASK;
11223                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11224 
11225                 /* Old exceptions are not signaled.  */
11226                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11227                 fex = exc & ~fex;
11228                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11229                 fex &= ((CPUArchState *)cpu_env)->swcr;
11230 
11231                 /* Update the hardware fpcr.  */
11232                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11233                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11234 
11235                 if (fex) {
11236                     int si_code = TARGET_FPE_FLTUNK;
11237                     target_siginfo_t info;
11238 
11239                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11240                         si_code = TARGET_FPE_FLTUND;
11241                     }
11242                     if (fex & SWCR_TRAP_ENABLE_INE) {
11243                         si_code = TARGET_FPE_FLTRES;
11244                     }
11245                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11246                         si_code = TARGET_FPE_FLTUND;
11247                     }
11248                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11249                         si_code = TARGET_FPE_FLTOVF;
11250                     }
11251                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11252                         si_code = TARGET_FPE_FLTDIV;
11253                     }
11254                     if (fex & SWCR_TRAP_ENABLE_INV) {
11255                         si_code = TARGET_FPE_FLTINV;
11256                     }
11257 
11258                     info.si_signo = SIGFPE;
11259                     info.si_errno = 0;
11260                     info.si_code = si_code;
11261                     info._sifields._sigfault._addr
11262                         = ((CPUArchState *)cpu_env)->pc;
11263                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11264                                  QEMU_SI_FAULT, &info);
11265                 }
11266                 ret = 0;
11267             }
11268             break;
11269 
11270           /* case SSI_NVPAIRS:
11271              -- Used with SSIN_UACPROC to enable unaligned accesses.
11272              case SSI_IEEE_STATE_AT_SIGNAL:
11273              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11274              -- Not implemented in linux kernel
11275           */
11276         }
11277         return ret;
11278 #endif
11279 #ifdef TARGET_NR_osf_sigprocmask
11280     /* Alpha specific.  */
11281     case TARGET_NR_osf_sigprocmask:
11282         {
11283             abi_ulong mask;
11284             int how;
11285             sigset_t set, oldset;
11286 
11287             switch(arg1) {
11288             case TARGET_SIG_BLOCK:
11289                 how = SIG_BLOCK;
11290                 break;
11291             case TARGET_SIG_UNBLOCK:
11292                 how = SIG_UNBLOCK;
11293                 break;
11294             case TARGET_SIG_SETMASK:
11295                 how = SIG_SETMASK;
11296                 break;
11297             default:
11298                 return -TARGET_EINVAL;
11299             }
11300             mask = arg2;
11301             target_to_host_old_sigset(&set, &mask);
11302             ret = do_sigprocmask(how, &set, &oldset);
11303             if (!ret) {
11304                 host_to_target_old_sigset(&mask, &oldset);
11305                 ret = mask;
11306             }
11307         }
11308         return ret;
11309 #endif
11310 
11311 #ifdef TARGET_NR_getgid32
11312     case TARGET_NR_getgid32:
11313         return get_errno(getgid());
11314 #endif
11315 #ifdef TARGET_NR_geteuid32
11316     case TARGET_NR_geteuid32:
11317         return get_errno(geteuid());
11318 #endif
11319 #ifdef TARGET_NR_getegid32
11320     case TARGET_NR_getegid32:
11321         return get_errno(getegid());
11322 #endif
11323 #ifdef TARGET_NR_setreuid32
11324     case TARGET_NR_setreuid32:
11325         return get_errno(setreuid(arg1, arg2));
11326 #endif
11327 #ifdef TARGET_NR_setregid32
11328     case TARGET_NR_setregid32:
11329         return get_errno(setregid(arg1, arg2));
11330 #endif
11331 #ifdef TARGET_NR_getgroups32
11332     case TARGET_NR_getgroups32:
11333         {
11334             int gidsetsize = arg1;
11335             uint32_t *target_grouplist;
11336             gid_t *grouplist;
11337             int i;
11338 
11339             grouplist = alloca(gidsetsize * sizeof(gid_t));
11340             ret = get_errno(getgroups(gidsetsize, grouplist));
11341             if (gidsetsize == 0)
11342                 return ret;
11343             if (!is_error(ret)) {
11344                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11345                 if (!target_grouplist) {
11346                     return -TARGET_EFAULT;
11347                 }
11348                 for(i = 0;i < ret; i++)
11349                     target_grouplist[i] = tswap32(grouplist[i]);
11350                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11351             }
11352         }
11353         return ret;
11354 #endif
11355 #ifdef TARGET_NR_setgroups32
11356     case TARGET_NR_setgroups32:
11357         {
11358             int gidsetsize = arg1;
11359             uint32_t *target_grouplist;
11360             gid_t *grouplist;
11361             int i;
11362 
11363             grouplist = alloca(gidsetsize * sizeof(gid_t));
11364             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11365             if (!target_grouplist) {
11366                 return -TARGET_EFAULT;
11367             }
11368             for(i = 0;i < gidsetsize; i++)
11369                 grouplist[i] = tswap32(target_grouplist[i]);
11370             unlock_user(target_grouplist, arg2, 0);
11371             return get_errno(setgroups(gidsetsize, grouplist));
11372         }
11373 #endif
11374 #ifdef TARGET_NR_fchown32
11375     case TARGET_NR_fchown32:
11376         return get_errno(fchown(arg1, arg2, arg3));
11377 #endif
11378 #ifdef TARGET_NR_setresuid32
11379     case TARGET_NR_setresuid32:
11380         return get_errno(sys_setresuid(arg1, arg2, arg3));
11381 #endif
11382 #ifdef TARGET_NR_getresuid32
11383     case TARGET_NR_getresuid32:
11384         {
11385             uid_t ruid, euid, suid;
11386             ret = get_errno(getresuid(&ruid, &euid, &suid));
11387             if (!is_error(ret)) {
11388                 if (put_user_u32(ruid, arg1)
11389                     || put_user_u32(euid, arg2)
11390                     || put_user_u32(suid, arg3))
11391                     return -TARGET_EFAULT;
11392             }
11393         }
11394         return ret;
11395 #endif
11396 #ifdef TARGET_NR_setresgid32
11397     case TARGET_NR_setresgid32:
11398         return get_errno(sys_setresgid(arg1, arg2, arg3));
11399 #endif
11400 #ifdef TARGET_NR_getresgid32
11401     case TARGET_NR_getresgid32:
11402         {
11403             gid_t rgid, egid, sgid;
11404             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11405             if (!is_error(ret)) {
11406                 if (put_user_u32(rgid, arg1)
11407                     || put_user_u32(egid, arg2)
11408                     || put_user_u32(sgid, arg3))
11409                     return -TARGET_EFAULT;
11410             }
11411         }
11412         return ret;
11413 #endif
11414 #ifdef TARGET_NR_chown32
11415     case TARGET_NR_chown32:
11416         if (!(p = lock_user_string(arg1)))
11417             return -TARGET_EFAULT;
11418         ret = get_errno(chown(p, arg2, arg3));
11419         unlock_user(p, arg1, 0);
11420         return ret;
11421 #endif
11422 #ifdef TARGET_NR_setuid32
11423     case TARGET_NR_setuid32:
11424         return get_errno(sys_setuid(arg1));
11425 #endif
11426 #ifdef TARGET_NR_setgid32
11427     case TARGET_NR_setgid32:
11428         return get_errno(sys_setgid(arg1));
11429 #endif
11430 #ifdef TARGET_NR_setfsuid32
11431     case TARGET_NR_setfsuid32:
11432         return get_errno(setfsuid(arg1));
11433 #endif
11434 #ifdef TARGET_NR_setfsgid32
11435     case TARGET_NR_setfsgid32:
11436         return get_errno(setfsgid(arg1));
11437 #endif
11438 #ifdef TARGET_NR_mincore
11439     case TARGET_NR_mincore:
11440         {
11441             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11442             if (!a) {
11443                 return -TARGET_ENOMEM;
11444             }
11445             p = lock_user_string(arg3);
11446             if (!p) {
11447                 ret = -TARGET_EFAULT;
11448             } else {
11449                 ret = get_errno(mincore(a, arg2, p));
11450                 unlock_user(p, arg3, ret);
11451             }
11452             unlock_user(a, arg1, 0);
11453         }
11454         return ret;
11455 #endif
11456 #ifdef TARGET_NR_arm_fadvise64_64
11457     case TARGET_NR_arm_fadvise64_64:
11458         /* arm_fadvise64_64 looks like fadvise64_64 but
11459          * with different argument order: fd, advice, offset, len
11460          * rather than the usual fd, offset, len, advice.
11461          * Note that offset and len are both 64-bit so appear as
11462          * pairs of 32-bit registers.
11463          */
11464         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11465                             target_offset64(arg5, arg6), arg2);
11466         return -host_to_target_errno(ret);
11467 #endif
11468 
11469 #if TARGET_ABI_BITS == 32
11470 
11471 #ifdef TARGET_NR_fadvise64_64
11472     case TARGET_NR_fadvise64_64:
11473 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11474         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11475         ret = arg2;
11476         arg2 = arg3;
11477         arg3 = arg4;
11478         arg4 = arg5;
11479         arg5 = arg6;
11480         arg6 = ret;
11481 #else
11482         /* 6 args: fd, offset (high, low), len (high, low), advice */
11483         if (regpairs_aligned(cpu_env, num)) {
11484             /* offset is in (3,4), len in (5,6) and advice in 7 */
11485             arg2 = arg3;
11486             arg3 = arg4;
11487             arg4 = arg5;
11488             arg5 = arg6;
11489             arg6 = arg7;
11490         }
11491 #endif
11492         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11493                             target_offset64(arg4, arg5), arg6);
11494         return -host_to_target_errno(ret);
11495 #endif
11496 
11497 #ifdef TARGET_NR_fadvise64
11498     case TARGET_NR_fadvise64:
11499         /* 5 args: fd, offset (high, low), len, advice */
11500         if (regpairs_aligned(cpu_env, num)) {
11501             /* offset is in (3,4), len in 5 and advice in 6 */
11502             arg2 = arg3;
11503             arg3 = arg4;
11504             arg4 = arg5;
11505             arg5 = arg6;
11506         }
11507         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11508         return -host_to_target_errno(ret);
11509 #endif
11510 
11511 #else /* not a 32-bit ABI */
11512 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11513 #ifdef TARGET_NR_fadvise64_64
11514     case TARGET_NR_fadvise64_64:
11515 #endif
11516 #ifdef TARGET_NR_fadvise64
11517     case TARGET_NR_fadvise64:
11518 #endif
11519 #ifdef TARGET_S390X
11520         switch (arg4) {
11521         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11522         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11523         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11524         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11525         default: break;
11526         }
11527 #endif
11528         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11529 #endif
11530 #endif /* end of 64-bit ABI fadvise handling */
11531 
11532 #ifdef TARGET_NR_madvise
11533     case TARGET_NR_madvise:
11534         /* A straight passthrough may not be safe because qemu sometimes
11535            turns private file-backed mappings into anonymous mappings.
11536            This will break MADV_DONTNEED.
11537            This is a hint, so ignoring and returning success is ok.  */
11538         return 0;
11539 #endif
11540 #ifdef TARGET_NR_fcntl64
11541     case TARGET_NR_fcntl64:
11542     {
11543         int cmd;
11544         struct flock64 fl;
11545         from_flock64_fn *copyfrom = copy_from_user_flock64;
11546         to_flock64_fn *copyto = copy_to_user_flock64;
11547 
11548 #ifdef TARGET_ARM
11549         if (!((CPUARMState *)cpu_env)->eabi) {
11550             copyfrom = copy_from_user_oabi_flock64;
11551             copyto = copy_to_user_oabi_flock64;
11552         }
11553 #endif
11554 
11555         cmd = target_to_host_fcntl_cmd(arg2);
11556         if (cmd == -TARGET_EINVAL) {
11557             return cmd;
11558         }
11559 
11560         switch(arg2) {
11561         case TARGET_F_GETLK64:
11562             ret = copyfrom(&fl, arg3);
11563             if (ret) {
11564                 break;
11565             }
11566             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11567             if (ret == 0) {
11568                 ret = copyto(arg3, &fl);
11569             }
11570 	    break;
11571 
11572         case TARGET_F_SETLK64:
11573         case TARGET_F_SETLKW64:
11574             ret = copyfrom(&fl, arg3);
11575             if (ret) {
11576                 break;
11577             }
11578             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11579 	    break;
11580         default:
11581             ret = do_fcntl(arg1, arg2, arg3);
11582             break;
11583         }
11584         return ret;
11585     }
11586 #endif
11587 #ifdef TARGET_NR_cacheflush
11588     case TARGET_NR_cacheflush:
11589         /* self-modifying code is handled automatically, so nothing needed */
11590         return 0;
11591 #endif
11592 #ifdef TARGET_NR_getpagesize
11593     case TARGET_NR_getpagesize:
11594         return TARGET_PAGE_SIZE;
11595 #endif
11596     case TARGET_NR_gettid:
11597         return get_errno(sys_gettid());
11598 #ifdef TARGET_NR_readahead
11599     case TARGET_NR_readahead:
11600 #if TARGET_ABI_BITS == 32
11601         if (regpairs_aligned(cpu_env, num)) {
11602             arg2 = arg3;
11603             arg3 = arg4;
11604             arg4 = arg5;
11605         }
11606         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11607 #else
11608         ret = get_errno(readahead(arg1, arg2, arg3));
11609 #endif
11610         return ret;
11611 #endif
11612 #ifdef CONFIG_ATTR
11613 #ifdef TARGET_NR_setxattr
11614     case TARGET_NR_listxattr:
11615     case TARGET_NR_llistxattr:
11616     {
11617         void *p, *b = 0;
11618         if (arg2) {
11619             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11620             if (!b) {
11621                 return -TARGET_EFAULT;
11622             }
11623         }
11624         p = lock_user_string(arg1);
11625         if (p) {
11626             if (num == TARGET_NR_listxattr) {
11627                 ret = get_errno(listxattr(p, b, arg3));
11628             } else {
11629                 ret = get_errno(llistxattr(p, b, arg3));
11630             }
11631         } else {
11632             ret = -TARGET_EFAULT;
11633         }
11634         unlock_user(p, arg1, 0);
11635         unlock_user(b, arg2, arg3);
11636         return ret;
11637     }
11638     case TARGET_NR_flistxattr:
11639     {
11640         void *b = 0;
11641         if (arg2) {
11642             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11643             if (!b) {
11644                 return -TARGET_EFAULT;
11645             }
11646         }
11647         ret = get_errno(flistxattr(arg1, b, arg3));
11648         unlock_user(b, arg2, arg3);
11649         return ret;
11650     }
11651     case TARGET_NR_setxattr:
11652     case TARGET_NR_lsetxattr:
11653         {
11654             void *p, *n, *v = 0;
11655             if (arg3) {
11656                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11657                 if (!v) {
11658                     return -TARGET_EFAULT;
11659                 }
11660             }
11661             p = lock_user_string(arg1);
11662             n = lock_user_string(arg2);
11663             if (p && n) {
11664                 if (num == TARGET_NR_setxattr) {
11665                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11666                 } else {
11667                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11668                 }
11669             } else {
11670                 ret = -TARGET_EFAULT;
11671             }
11672             unlock_user(p, arg1, 0);
11673             unlock_user(n, arg2, 0);
11674             unlock_user(v, arg3, 0);
11675         }
11676         return ret;
11677     case TARGET_NR_fsetxattr:
11678         {
11679             void *n, *v = 0;
11680             if (arg3) {
11681                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11682                 if (!v) {
11683                     return -TARGET_EFAULT;
11684                 }
11685             }
11686             n = lock_user_string(arg2);
11687             if (n) {
11688                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11689             } else {
11690                 ret = -TARGET_EFAULT;
11691             }
11692             unlock_user(n, arg2, 0);
11693             unlock_user(v, arg3, 0);
11694         }
11695         return ret;
11696     case TARGET_NR_getxattr:
11697     case TARGET_NR_lgetxattr:
11698         {
11699             void *p, *n, *v = 0;
11700             if (arg3) {
11701                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11702                 if (!v) {
11703                     return -TARGET_EFAULT;
11704                 }
11705             }
11706             p = lock_user_string(arg1);
11707             n = lock_user_string(arg2);
11708             if (p && n) {
11709                 if (num == TARGET_NR_getxattr) {
11710                     ret = get_errno(getxattr(p, n, v, arg4));
11711                 } else {
11712                     ret = get_errno(lgetxattr(p, n, v, arg4));
11713                 }
11714             } else {
11715                 ret = -TARGET_EFAULT;
11716             }
11717             unlock_user(p, arg1, 0);
11718             unlock_user(n, arg2, 0);
11719             unlock_user(v, arg3, arg4);
11720         }
11721         return ret;
11722     case TARGET_NR_fgetxattr:
11723         {
11724             void *n, *v = 0;
11725             if (arg3) {
11726                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11727                 if (!v) {
11728                     return -TARGET_EFAULT;
11729                 }
11730             }
11731             n = lock_user_string(arg2);
11732             if (n) {
11733                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11734             } else {
11735                 ret = -TARGET_EFAULT;
11736             }
11737             unlock_user(n, arg2, 0);
11738             unlock_user(v, arg3, arg4);
11739         }
11740         return ret;
11741     case TARGET_NR_removexattr:
11742     case TARGET_NR_lremovexattr:
11743         {
11744             void *p, *n;
11745             p = lock_user_string(arg1);
11746             n = lock_user_string(arg2);
11747             if (p && n) {
11748                 if (num == TARGET_NR_removexattr) {
11749                     ret = get_errno(removexattr(p, n));
11750                 } else {
11751                     ret = get_errno(lremovexattr(p, n));
11752                 }
11753             } else {
11754                 ret = -TARGET_EFAULT;
11755             }
11756             unlock_user(p, arg1, 0);
11757             unlock_user(n, arg2, 0);
11758         }
11759         return ret;
11760     case TARGET_NR_fremovexattr:
11761         {
11762             void *n;
11763             n = lock_user_string(arg2);
11764             if (n) {
11765                 ret = get_errno(fremovexattr(arg1, n));
11766             } else {
11767                 ret = -TARGET_EFAULT;
11768             }
11769             unlock_user(n, arg2, 0);
11770         }
11771         return ret;
11772 #endif
11773 #endif /* CONFIG_ATTR */
11774 #ifdef TARGET_NR_set_thread_area
11775     case TARGET_NR_set_thread_area:
11776 #if defined(TARGET_MIPS)
11777       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11778       return 0;
11779 #elif defined(TARGET_CRIS)
11780       if (arg1 & 0xff)
11781           ret = -TARGET_EINVAL;
11782       else {
11783           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11784           ret = 0;
11785       }
11786       return ret;
11787 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11788       return do_set_thread_area(cpu_env, arg1);
11789 #elif defined(TARGET_M68K)
11790       {
11791           TaskState *ts = cpu->opaque;
11792           ts->tp_value = arg1;
11793           return 0;
11794       }
11795 #else
11796       return -TARGET_ENOSYS;
11797 #endif
11798 #endif
11799 #ifdef TARGET_NR_get_thread_area
11800     case TARGET_NR_get_thread_area:
11801 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11802         return do_get_thread_area(cpu_env, arg1);
11803 #elif defined(TARGET_M68K)
11804         {
11805             TaskState *ts = cpu->opaque;
11806             return ts->tp_value;
11807         }
11808 #else
11809         return -TARGET_ENOSYS;
11810 #endif
11811 #endif
11812 #ifdef TARGET_NR_getdomainname
11813     case TARGET_NR_getdomainname:
11814         return -TARGET_ENOSYS;
11815 #endif
11816 
11817 #ifdef TARGET_NR_clock_settime
11818     case TARGET_NR_clock_settime:
11819     {
11820         struct timespec ts;
11821 
11822         ret = target_to_host_timespec(&ts, arg2);
11823         if (!is_error(ret)) {
11824             ret = get_errno(clock_settime(arg1, &ts));
11825         }
11826         return ret;
11827     }
11828 #endif
11829 #ifdef TARGET_NR_clock_settime64
11830     case TARGET_NR_clock_settime64:
11831     {
11832         struct timespec ts;
11833 
11834         ret = target_to_host_timespec64(&ts, arg2);
11835         if (!is_error(ret)) {
11836             ret = get_errno(clock_settime(arg1, &ts));
11837         }
11838         return ret;
11839     }
11840 #endif
11841 #ifdef TARGET_NR_clock_gettime
11842     case TARGET_NR_clock_gettime:
11843     {
11844         struct timespec ts;
11845         ret = get_errno(clock_gettime(arg1, &ts));
11846         if (!is_error(ret)) {
11847             ret = host_to_target_timespec(arg2, &ts);
11848         }
11849         return ret;
11850     }
11851 #endif
11852 #ifdef TARGET_NR_clock_gettime64
11853     case TARGET_NR_clock_gettime64:
11854     {
11855         struct timespec ts;
11856         ret = get_errno(clock_gettime(arg1, &ts));
11857         if (!is_error(ret)) {
11858             ret = host_to_target_timespec64(arg2, &ts);
11859         }
11860         return ret;
11861     }
11862 #endif
11863 #ifdef TARGET_NR_clock_getres
11864     case TARGET_NR_clock_getres:
11865     {
11866         struct timespec ts;
11867         ret = get_errno(clock_getres(arg1, &ts));
11868         if (!is_error(ret)) {
11869             host_to_target_timespec(arg2, &ts);
11870         }
11871         return ret;
11872     }
11873 #endif
11874 #ifdef TARGET_NR_clock_getres_time64
11875     case TARGET_NR_clock_getres_time64:
11876     {
11877         struct timespec ts;
11878         ret = get_errno(clock_getres(arg1, &ts));
11879         if (!is_error(ret)) {
11880             host_to_target_timespec64(arg2, &ts);
11881         }
11882         return ret;
11883     }
11884 #endif
11885 #ifdef TARGET_NR_clock_nanosleep
11886     case TARGET_NR_clock_nanosleep:
11887     {
11888         struct timespec ts;
11889         if (target_to_host_timespec(&ts, arg3)) {
11890             return -TARGET_EFAULT;
11891         }
11892         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11893                                              &ts, arg4 ? &ts : NULL));
11894         /*
11895          * if the call is interrupted by a signal handler, it fails
11896          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11897          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11898          */
11899         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
11900             host_to_target_timespec(arg4, &ts)) {
11901               return -TARGET_EFAULT;
11902         }
11903 
11904         return ret;
11905     }
11906 #endif
11907 
11908 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11909     case TARGET_NR_set_tid_address:
11910         return get_errno(set_tid_address((int *)g2h(arg1)));
11911 #endif
11912 
11913     case TARGET_NR_tkill:
11914         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11915 
11916     case TARGET_NR_tgkill:
11917         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11918                          target_to_host_signal(arg3)));
11919 
11920 #ifdef TARGET_NR_set_robust_list
11921     case TARGET_NR_set_robust_list:
11922     case TARGET_NR_get_robust_list:
11923         /* The ABI for supporting robust futexes has userspace pass
11924          * the kernel a pointer to a linked list which is updated by
11925          * userspace after the syscall; the list is walked by the kernel
11926          * when the thread exits. Since the linked list in QEMU guest
11927          * memory isn't a valid linked list for the host and we have
11928          * no way to reliably intercept the thread-death event, we can't
11929          * support these. Silently return ENOSYS so that guest userspace
11930          * falls back to a non-robust futex implementation (which should
11931          * be OK except in the corner case of the guest crashing while
11932          * holding a mutex that is shared with another process via
11933          * shared memory).
11934          */
11935         return -TARGET_ENOSYS;
11936 #endif
11937 
11938 #if defined(TARGET_NR_utimensat)
11939     case TARGET_NR_utimensat:
11940         {
11941             struct timespec *tsp, ts[2];
11942             if (!arg3) {
11943                 tsp = NULL;
11944             } else {
11945                 if (target_to_host_timespec(ts, arg3)) {
11946                     return -TARGET_EFAULT;
11947                 }
11948                 if (target_to_host_timespec(ts + 1, arg3 +
11949                                             sizeof(struct target_timespec))) {
11950                     return -TARGET_EFAULT;
11951                 }
11952                 tsp = ts;
11953             }
11954             if (!arg2)
11955                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11956             else {
11957                 if (!(p = lock_user_string(arg2))) {
11958                     return -TARGET_EFAULT;
11959                 }
11960                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11961                 unlock_user(p, arg2, 0);
11962             }
11963         }
11964         return ret;
11965 #endif
11966 #ifdef TARGET_NR_futex
11967     case TARGET_NR_futex:
11968         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11969 #endif
11970 #ifdef TARGET_NR_futex_time64
11971     case TARGET_NR_futex_time64:
11972         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11973 #endif
11974 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11975     case TARGET_NR_inotify_init:
11976         ret = get_errno(sys_inotify_init());
11977         if (ret >= 0) {
11978             fd_trans_register(ret, &target_inotify_trans);
11979         }
11980         return ret;
11981 #endif
11982 #ifdef CONFIG_INOTIFY1
11983 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11984     case TARGET_NR_inotify_init1:
11985         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11986                                           fcntl_flags_tbl)));
11987         if (ret >= 0) {
11988             fd_trans_register(ret, &target_inotify_trans);
11989         }
11990         return ret;
11991 #endif
11992 #endif
11993 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11994     case TARGET_NR_inotify_add_watch:
11995         p = lock_user_string(arg2);
11996         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11997         unlock_user(p, arg2, 0);
11998         return ret;
11999 #endif
12000 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12001     case TARGET_NR_inotify_rm_watch:
12002         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12003 #endif
12004 
12005 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12006     case TARGET_NR_mq_open:
12007         {
12008             struct mq_attr posix_mq_attr;
12009             struct mq_attr *pposix_mq_attr;
12010             int host_flags;
12011 
12012             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12013             pposix_mq_attr = NULL;
12014             if (arg4) {
12015                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12016                     return -TARGET_EFAULT;
12017                 }
12018                 pposix_mq_attr = &posix_mq_attr;
12019             }
12020             p = lock_user_string(arg1 - 1);
12021             if (!p) {
12022                 return -TARGET_EFAULT;
12023             }
12024             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12025             unlock_user (p, arg1, 0);
12026         }
12027         return ret;
12028 
12029     case TARGET_NR_mq_unlink:
12030         p = lock_user_string(arg1 - 1);
12031         if (!p) {
12032             return -TARGET_EFAULT;
12033         }
12034         ret = get_errno(mq_unlink(p));
12035         unlock_user (p, arg1, 0);
12036         return ret;
12037 
12038 #ifdef TARGET_NR_mq_timedsend
12039     case TARGET_NR_mq_timedsend:
12040         {
12041             struct timespec ts;
12042 
12043             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12044             if (arg5 != 0) {
12045                 target_to_host_timespec(&ts, arg5);
12046                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12047                 host_to_target_timespec(arg5, &ts);
12048             } else {
12049                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12050             }
12051             unlock_user (p, arg2, arg3);
12052         }
12053         return ret;
12054 #endif
12055 
12056 #ifdef TARGET_NR_mq_timedreceive
12057     case TARGET_NR_mq_timedreceive:
12058         {
12059             struct timespec ts;
12060             unsigned int prio;
12061 
12062             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12063             if (arg5 != 0) {
12064                 target_to_host_timespec(&ts, arg5);
12065                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12066                                                      &prio, &ts));
12067                 host_to_target_timespec(arg5, &ts);
12068             } else {
12069                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12070                                                      &prio, NULL));
12071             }
12072             unlock_user (p, arg2, arg3);
12073             if (arg4 != 0)
12074                 put_user_u32(prio, arg4);
12075         }
12076         return ret;
12077 #endif
12078 
12079     /* Not implemented for now... */
12080 /*     case TARGET_NR_mq_notify: */
12081 /*         break; */
12082 
12083     case TARGET_NR_mq_getsetattr:
12084         {
12085             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12086             ret = 0;
12087             if (arg2 != 0) {
12088                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12089                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12090                                            &posix_mq_attr_out));
12091             } else if (arg3 != 0) {
12092                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12093             }
12094             if (ret == 0 && arg3 != 0) {
12095                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12096             }
12097         }
12098         return ret;
12099 #endif
12100 
12101 #ifdef CONFIG_SPLICE
12102 #ifdef TARGET_NR_tee
12103     case TARGET_NR_tee:
12104         {
12105             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12106         }
12107         return ret;
12108 #endif
12109 #ifdef TARGET_NR_splice
12110     case TARGET_NR_splice:
12111         {
12112             loff_t loff_in, loff_out;
12113             loff_t *ploff_in = NULL, *ploff_out = NULL;
12114             if (arg2) {
12115                 if (get_user_u64(loff_in, arg2)) {
12116                     return -TARGET_EFAULT;
12117                 }
12118                 ploff_in = &loff_in;
12119             }
12120             if (arg4) {
12121                 if (get_user_u64(loff_out, arg4)) {
12122                     return -TARGET_EFAULT;
12123                 }
12124                 ploff_out = &loff_out;
12125             }
12126             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12127             if (arg2) {
12128                 if (put_user_u64(loff_in, arg2)) {
12129                     return -TARGET_EFAULT;
12130                 }
12131             }
12132             if (arg4) {
12133                 if (put_user_u64(loff_out, arg4)) {
12134                     return -TARGET_EFAULT;
12135                 }
12136             }
12137         }
12138         return ret;
12139 #endif
12140 #ifdef TARGET_NR_vmsplice
12141 	case TARGET_NR_vmsplice:
12142         {
12143             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12144             if (vec != NULL) {
12145                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12146                 unlock_iovec(vec, arg2, arg3, 0);
12147             } else {
12148                 ret = -host_to_target_errno(errno);
12149             }
12150         }
12151         return ret;
12152 #endif
12153 #endif /* CONFIG_SPLICE */
12154 #ifdef CONFIG_EVENTFD
12155 #if defined(TARGET_NR_eventfd)
12156     case TARGET_NR_eventfd:
12157         ret = get_errno(eventfd(arg1, 0));
12158         if (ret >= 0) {
12159             fd_trans_register(ret, &target_eventfd_trans);
12160         }
12161         return ret;
12162 #endif
12163 #if defined(TARGET_NR_eventfd2)
12164     case TARGET_NR_eventfd2:
12165     {
12166         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12167         if (arg2 & TARGET_O_NONBLOCK) {
12168             host_flags |= O_NONBLOCK;
12169         }
12170         if (arg2 & TARGET_O_CLOEXEC) {
12171             host_flags |= O_CLOEXEC;
12172         }
12173         ret = get_errno(eventfd(arg1, host_flags));
12174         if (ret >= 0) {
12175             fd_trans_register(ret, &target_eventfd_trans);
12176         }
12177         return ret;
12178     }
12179 #endif
12180 #endif /* CONFIG_EVENTFD  */
12181 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12182     case TARGET_NR_fallocate:
12183 #if TARGET_ABI_BITS == 32
12184         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12185                                   target_offset64(arg5, arg6)));
12186 #else
12187         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12188 #endif
12189         return ret;
12190 #endif
12191 #if defined(CONFIG_SYNC_FILE_RANGE)
12192 #if defined(TARGET_NR_sync_file_range)
12193     case TARGET_NR_sync_file_range:
12194 #if TARGET_ABI_BITS == 32
12195 #if defined(TARGET_MIPS)
12196         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12197                                         target_offset64(arg5, arg6), arg7));
12198 #else
12199         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12200                                         target_offset64(arg4, arg5), arg6));
12201 #endif /* !TARGET_MIPS */
12202 #else
12203         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12204 #endif
12205         return ret;
12206 #endif
12207 #if defined(TARGET_NR_sync_file_range2) || \
12208     defined(TARGET_NR_arm_sync_file_range)
12209 #if defined(TARGET_NR_sync_file_range2)
12210     case TARGET_NR_sync_file_range2:
12211 #endif
12212 #if defined(TARGET_NR_arm_sync_file_range)
12213     case TARGET_NR_arm_sync_file_range:
12214 #endif
12215         /* This is like sync_file_range but the arguments are reordered */
12216 #if TARGET_ABI_BITS == 32
12217         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12218                                         target_offset64(arg5, arg6), arg2));
12219 #else
12220         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12221 #endif
12222         return ret;
12223 #endif
12224 #endif
12225 #if defined(TARGET_NR_signalfd4)
12226     case TARGET_NR_signalfd4:
12227         return do_signalfd4(arg1, arg2, arg4);
12228 #endif
12229 #if defined(TARGET_NR_signalfd)
12230     case TARGET_NR_signalfd:
12231         return do_signalfd4(arg1, arg2, 0);
12232 #endif
12233 #if defined(CONFIG_EPOLL)
12234 #if defined(TARGET_NR_epoll_create)
12235     case TARGET_NR_epoll_create:
12236         return get_errno(epoll_create(arg1));
12237 #endif
12238 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12239     case TARGET_NR_epoll_create1:
12240         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12241 #endif
12242 #if defined(TARGET_NR_epoll_ctl)
12243     case TARGET_NR_epoll_ctl:
12244     {
12245         struct epoll_event ep;
12246         struct epoll_event *epp = 0;
12247         if (arg4) {
12248             struct target_epoll_event *target_ep;
12249             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12250                 return -TARGET_EFAULT;
12251             }
12252             ep.events = tswap32(target_ep->events);
12253             /* The epoll_data_t union is just opaque data to the kernel,
12254              * so we transfer all 64 bits across and need not worry what
12255              * actual data type it is.
12256              */
12257             ep.data.u64 = tswap64(target_ep->data.u64);
12258             unlock_user_struct(target_ep, arg4, 0);
12259             epp = &ep;
12260         }
12261         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12262     }
12263 #endif
12264 
12265 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12266 #if defined(TARGET_NR_epoll_wait)
12267     case TARGET_NR_epoll_wait:
12268 #endif
12269 #if defined(TARGET_NR_epoll_pwait)
12270     case TARGET_NR_epoll_pwait:
12271 #endif
12272     {
12273         struct target_epoll_event *target_ep;
12274         struct epoll_event *ep;
12275         int epfd = arg1;
12276         int maxevents = arg3;
12277         int timeout = arg4;
12278 
12279         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12280             return -TARGET_EINVAL;
12281         }
12282 
12283         target_ep = lock_user(VERIFY_WRITE, arg2,
12284                               maxevents * sizeof(struct target_epoll_event), 1);
12285         if (!target_ep) {
12286             return -TARGET_EFAULT;
12287         }
12288 
12289         ep = g_try_new(struct epoll_event, maxevents);
12290         if (!ep) {
12291             unlock_user(target_ep, arg2, 0);
12292             return -TARGET_ENOMEM;
12293         }
12294 
12295         switch (num) {
12296 #if defined(TARGET_NR_epoll_pwait)
12297         case TARGET_NR_epoll_pwait:
12298         {
12299             target_sigset_t *target_set;
12300             sigset_t _set, *set = &_set;
12301 
12302             if (arg5) {
12303                 if (arg6 != sizeof(target_sigset_t)) {
12304                     ret = -TARGET_EINVAL;
12305                     break;
12306                 }
12307 
12308                 target_set = lock_user(VERIFY_READ, arg5,
12309                                        sizeof(target_sigset_t), 1);
12310                 if (!target_set) {
12311                     ret = -TARGET_EFAULT;
12312                     break;
12313                 }
12314                 target_to_host_sigset(set, target_set);
12315                 unlock_user(target_set, arg5, 0);
12316             } else {
12317                 set = NULL;
12318             }
12319 
12320             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12321                                              set, SIGSET_T_SIZE));
12322             break;
12323         }
12324 #endif
12325 #if defined(TARGET_NR_epoll_wait)
12326         case TARGET_NR_epoll_wait:
12327             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12328                                              NULL, 0));
12329             break;
12330 #endif
12331         default:
12332             ret = -TARGET_ENOSYS;
12333         }
12334         if (!is_error(ret)) {
12335             int i;
12336             for (i = 0; i < ret; i++) {
12337                 target_ep[i].events = tswap32(ep[i].events);
12338                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12339             }
12340             unlock_user(target_ep, arg2,
12341                         ret * sizeof(struct target_epoll_event));
12342         } else {
12343             unlock_user(target_ep, arg2, 0);
12344         }
12345         g_free(ep);
12346         return ret;
12347     }
12348 #endif
12349 #endif
12350 #ifdef TARGET_NR_prlimit64
12351     case TARGET_NR_prlimit64:
12352     {
12353         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12354         struct target_rlimit64 *target_rnew, *target_rold;
12355         struct host_rlimit64 rnew, rold, *rnewp = 0;
12356         int resource = target_to_host_resource(arg2);
12357 
12358         if (arg3 && (resource != RLIMIT_AS &&
12359                      resource != RLIMIT_DATA &&
12360                      resource != RLIMIT_STACK)) {
12361             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12362                 return -TARGET_EFAULT;
12363             }
12364             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12365             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12366             unlock_user_struct(target_rnew, arg3, 0);
12367             rnewp = &rnew;
12368         }
12369 
12370         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12371         if (!is_error(ret) && arg4) {
12372             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12373                 return -TARGET_EFAULT;
12374             }
12375             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12376             target_rold->rlim_max = tswap64(rold.rlim_max);
12377             unlock_user_struct(target_rold, arg4, 1);
12378         }
12379         return ret;
12380     }
12381 #endif
12382 #ifdef TARGET_NR_gethostname
12383     case TARGET_NR_gethostname:
12384     {
12385         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12386         if (name) {
12387             ret = get_errno(gethostname(name, arg2));
12388             unlock_user(name, arg1, arg2);
12389         } else {
12390             ret = -TARGET_EFAULT;
12391         }
12392         return ret;
12393     }
12394 #endif
12395 #ifdef TARGET_NR_atomic_cmpxchg_32
12396     case TARGET_NR_atomic_cmpxchg_32:
12397     {
12398         /* should use start_exclusive from main.c */
12399         abi_ulong mem_value;
12400         if (get_user_u32(mem_value, arg6)) {
12401             target_siginfo_t info;
12402             info.si_signo = SIGSEGV;
12403             info.si_errno = 0;
12404             info.si_code = TARGET_SEGV_MAPERR;
12405             info._sifields._sigfault._addr = arg6;
12406             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12407                          QEMU_SI_FAULT, &info);
12408             ret = 0xdeadbeef;
12409 
12410         }
12411         if (mem_value == arg2)
12412             put_user_u32(arg1, arg6);
12413         return mem_value;
12414     }
12415 #endif
12416 #ifdef TARGET_NR_atomic_barrier
12417     case TARGET_NR_atomic_barrier:
12418         /* Like the kernel implementation and the
12419            qemu arm barrier, no-op this? */
12420         return 0;
12421 #endif
12422 
12423 #ifdef TARGET_NR_timer_create
12424     case TARGET_NR_timer_create:
12425     {
12426         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12427 
12428         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12429 
12430         int clkid = arg1;
12431         int timer_index = next_free_host_timer();
12432 
12433         if (timer_index < 0) {
12434             ret = -TARGET_EAGAIN;
12435         } else {
12436             timer_t *phtimer = g_posix_timers  + timer_index;
12437 
12438             if (arg2) {
12439                 phost_sevp = &host_sevp;
12440                 ret = target_to_host_sigevent(phost_sevp, arg2);
12441                 if (ret != 0) {
12442                     return ret;
12443                 }
12444             }
12445 
12446             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12447             if (ret) {
12448                 phtimer = NULL;
12449             } else {
12450                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12451                     return -TARGET_EFAULT;
12452                 }
12453             }
12454         }
12455         return ret;
12456     }
12457 #endif
12458 
12459 #ifdef TARGET_NR_timer_settime
12460     case TARGET_NR_timer_settime:
12461     {
12462         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12463          * struct itimerspec * old_value */
12464         target_timer_t timerid = get_timer_id(arg1);
12465 
12466         if (timerid < 0) {
12467             ret = timerid;
12468         } else if (arg3 == 0) {
12469             ret = -TARGET_EINVAL;
12470         } else {
12471             timer_t htimer = g_posix_timers[timerid];
12472             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12473 
12474             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12475                 return -TARGET_EFAULT;
12476             }
12477             ret = get_errno(
12478                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12479             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12480                 return -TARGET_EFAULT;
12481             }
12482         }
12483         return ret;
12484     }
12485 #endif
12486 
12487 #ifdef TARGET_NR_timer_settime64
12488     case TARGET_NR_timer_settime64:
12489     {
12490         target_timer_t timerid = get_timer_id(arg1);
12491 
12492         if (timerid < 0) {
12493             ret = timerid;
12494         } else if (arg3 == 0) {
12495             ret = -TARGET_EINVAL;
12496         } else {
12497             timer_t htimer = g_posix_timers[timerid];
12498             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12499 
12500             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12501                 return -TARGET_EFAULT;
12502             }
12503             ret = get_errno(
12504                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12505             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12506                 return -TARGET_EFAULT;
12507             }
12508         }
12509         return ret;
12510     }
12511 #endif
12512 
12513 #ifdef TARGET_NR_timer_gettime
12514     case TARGET_NR_timer_gettime:
12515     {
12516         /* args: timer_t timerid, struct itimerspec *curr_value */
12517         target_timer_t timerid = get_timer_id(arg1);
12518 
12519         if (timerid < 0) {
12520             ret = timerid;
12521         } else if (!arg2) {
12522             ret = -TARGET_EFAULT;
12523         } else {
12524             timer_t htimer = g_posix_timers[timerid];
12525             struct itimerspec hspec;
12526             ret = get_errno(timer_gettime(htimer, &hspec));
12527 
12528             if (host_to_target_itimerspec(arg2, &hspec)) {
12529                 ret = -TARGET_EFAULT;
12530             }
12531         }
12532         return ret;
12533     }
12534 #endif
12535 
12536 #ifdef TARGET_NR_timer_gettime64
12537     case TARGET_NR_timer_gettime64:
12538     {
12539         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12540         target_timer_t timerid = get_timer_id(arg1);
12541 
12542         if (timerid < 0) {
12543             ret = timerid;
12544         } else if (!arg2) {
12545             ret = -TARGET_EFAULT;
12546         } else {
12547             timer_t htimer = g_posix_timers[timerid];
12548             struct itimerspec hspec;
12549             ret = get_errno(timer_gettime(htimer, &hspec));
12550 
12551             if (host_to_target_itimerspec64(arg2, &hspec)) {
12552                 ret = -TARGET_EFAULT;
12553             }
12554         }
12555         return ret;
12556     }
12557 #endif
12558 
12559 #ifdef TARGET_NR_timer_getoverrun
12560     case TARGET_NR_timer_getoverrun:
12561     {
12562         /* args: timer_t timerid */
12563         target_timer_t timerid = get_timer_id(arg1);
12564 
12565         if (timerid < 0) {
12566             ret = timerid;
12567         } else {
12568             timer_t htimer = g_posix_timers[timerid];
12569             ret = get_errno(timer_getoverrun(htimer));
12570         }
12571         return ret;
12572     }
12573 #endif
12574 
12575 #ifdef TARGET_NR_timer_delete
12576     case TARGET_NR_timer_delete:
12577     {
12578         /* args: timer_t timerid */
12579         target_timer_t timerid = get_timer_id(arg1);
12580 
12581         if (timerid < 0) {
12582             ret = timerid;
12583         } else {
12584             timer_t htimer = g_posix_timers[timerid];
12585             ret = get_errno(timer_delete(htimer));
12586             g_posix_timers[timerid] = 0;
12587         }
12588         return ret;
12589     }
12590 #endif
12591 
12592 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12593     case TARGET_NR_timerfd_create:
12594         return get_errno(timerfd_create(arg1,
12595                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12596 #endif
12597 
12598 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12599     case TARGET_NR_timerfd_gettime:
12600         {
12601             struct itimerspec its_curr;
12602 
12603             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12604 
12605             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12606                 return -TARGET_EFAULT;
12607             }
12608         }
12609         return ret;
12610 #endif
12611 
12612 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12613     case TARGET_NR_timerfd_gettime64:
12614         {
12615             struct itimerspec its_curr;
12616 
12617             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12618 
12619             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12620                 return -TARGET_EFAULT;
12621             }
12622         }
12623         return ret;
12624 #endif
12625 
12626 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12627     case TARGET_NR_timerfd_settime:
12628         {
12629             struct itimerspec its_new, its_old, *p_new;
12630 
12631             if (arg3) {
12632                 if (target_to_host_itimerspec(&its_new, arg3)) {
12633                     return -TARGET_EFAULT;
12634                 }
12635                 p_new = &its_new;
12636             } else {
12637                 p_new = NULL;
12638             }
12639 
12640             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12641 
12642             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12643                 return -TARGET_EFAULT;
12644             }
12645         }
12646         return ret;
12647 #endif
12648 
12649 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12650     case TARGET_NR_timerfd_settime64:
12651         {
12652             struct itimerspec its_new, its_old, *p_new;
12653 
12654             if (arg3) {
12655                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12656                     return -TARGET_EFAULT;
12657                 }
12658                 p_new = &its_new;
12659             } else {
12660                 p_new = NULL;
12661             }
12662 
12663             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12664 
12665             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12666                 return -TARGET_EFAULT;
12667             }
12668         }
12669         return ret;
12670 #endif
12671 
12672 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12673     case TARGET_NR_ioprio_get:
12674         return get_errno(ioprio_get(arg1, arg2));
12675 #endif
12676 
12677 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12678     case TARGET_NR_ioprio_set:
12679         return get_errno(ioprio_set(arg1, arg2, arg3));
12680 #endif
12681 
12682 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12683     case TARGET_NR_setns:
12684         return get_errno(setns(arg1, arg2));
12685 #endif
12686 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12687     case TARGET_NR_unshare:
12688         return get_errno(unshare(arg1));
12689 #endif
12690 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12691     case TARGET_NR_kcmp:
12692         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12693 #endif
12694 #ifdef TARGET_NR_swapcontext
12695     case TARGET_NR_swapcontext:
12696         /* PowerPC specific.  */
12697         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12698 #endif
12699 #ifdef TARGET_NR_memfd_create
12700     case TARGET_NR_memfd_create:
12701         p = lock_user_string(arg1);
12702         if (!p) {
12703             return -TARGET_EFAULT;
12704         }
12705         ret = get_errno(memfd_create(p, arg2));
12706         fd_trans_unregister(ret);
12707         unlock_user(p, arg1, 0);
12708         return ret;
12709 #endif
12710 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12711     case TARGET_NR_membarrier:
12712         return get_errno(membarrier(arg1, arg2));
12713 #endif
12714 
12715     default:
12716         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12717         return -TARGET_ENOSYS;
12718     }
12719     return ret;
12720 }
12721 
12722 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12723                     abi_long arg2, abi_long arg3, abi_long arg4,
12724                     abi_long arg5, abi_long arg6, abi_long arg7,
12725                     abi_long arg8)
12726 {
12727     CPUState *cpu = env_cpu(cpu_env);
12728     abi_long ret;
12729 
12730 #ifdef DEBUG_ERESTARTSYS
12731     /* Debug-only code for exercising the syscall-restart code paths
12732      * in the per-architecture cpu main loops: restart every syscall
12733      * the guest makes once before letting it through.
12734      */
12735     {
12736         static bool flag;
12737         flag = !flag;
12738         if (flag) {
12739             return -TARGET_ERESTARTSYS;
12740         }
12741     }
12742 #endif
12743 
12744     record_syscall_start(cpu, num, arg1,
12745                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12746 
12747     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12748         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12749     }
12750 
12751     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12752                       arg5, arg6, arg7, arg8);
12753 
12754     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12755         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12756                           arg3, arg4, arg5, arg6);
12757     }
12758 
12759     record_syscall_return(cpu, num, ret);
12760     return ret;
12761 }
12762