xref: /openbmc/qemu/linux-user/syscall.c (revision dcbcf5cf)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
121 
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
129 
130 #ifndef CLONE_IO
131 #define CLONE_IO                0x80000000      /* Clone io context */
132 #endif
133 
134 /* We can't directly call the host clone syscall, because this will
135  * badly confuse libc (breaking mutexes, for example). So we must
136  * divide clone flags into:
137  *  * flag combinations that look like pthread_create()
138  *  * flag combinations that look like fork()
139  *  * flags we can implement within QEMU itself
140  *  * flags we can't support and will return an error for
141  */
142 /* For thread creation, all these flags must be present; for
143  * fork, none must be present.
144  */
145 #define CLONE_THREAD_FLAGS                              \
146     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
147      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 
149 /* These flags are ignored:
150  * CLONE_DETACHED is now ignored by the kernel;
151  * CLONE_IO is just an optimisation hint to the I/O scheduler
152  */
153 #define CLONE_IGNORED_FLAGS                     \
154     (CLONE_DETACHED | CLONE_IO)
155 
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS               \
158     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
159      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
163     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
164      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 
166 #define CLONE_INVALID_FORK_FLAGS                                        \
167     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 
169 #define CLONE_INVALID_THREAD_FLAGS                                      \
170     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
171        CLONE_IGNORED_FLAGS))
172 
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174  * have almost all been allocated. We cannot support any of
175  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177  * The checks against the invalid thread masks above will catch these.
178  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
179  */
180 
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182  * once. This exercises the codepaths for restart.
183  */
184 //#define DEBUG_ERESTARTSYS
185 
186 //#include <linux/msdos_fs.h>
187 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
188 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
189 
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
197 
198 #define _syscall0(type,name)		\
199 static type name (void)			\
200 {					\
201 	return syscall(__NR_##name);	\
202 }
203 
204 #define _syscall1(type,name,type1,arg1)		\
205 static type name (type1 arg1)			\
206 {						\
207 	return syscall(__NR_##name, arg1);	\
208 }
209 
210 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
211 static type name (type1 arg1,type2 arg2)		\
212 {							\
213 	return syscall(__NR_##name, arg1, arg2);	\
214 }
215 
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
217 static type name (type1 arg1,type2 arg2,type3 arg3)		\
218 {								\
219 	return syscall(__NR_##name, arg1, arg2, arg3);		\
220 }
221 
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
224 {										\
225 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
226 }
227 
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
229 		  type5,arg5)							\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
233 }
234 
235 
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
237 		  type5,arg5,type6,arg6)					\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
239                   type6 arg6)							\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
242 }
243 
244 
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
263 
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
267 
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
272 
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
275 
276 /* For the 64-bit guest on 32-bit host case we must emulate
277  * getdents using getdents64, because otherwise the host
278  * might hand us back more dirent records than we can fit
279  * into the guest buffer after structure format conversion.
280  * Otherwise we emulate getdents with getdents if the host has it.
281  */
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
285 
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
296           loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300           siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310           const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314           const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318           unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325           void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327           struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329           struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
339 
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342           unsigned long, idx1, unsigned long, idx2)
343 #endif
344 
345 /*
346  * It is assumed that struct statx is architecture independent.
347  */
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350           unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
355 
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
358   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
359   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
360   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
361   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
362   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
363   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
364   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
365   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
366   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
367   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
368   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
370 #if defined(O_DIRECT)
371   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
372 #endif
373 #if defined(O_NOATIME)
374   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
375 #endif
376 #if defined(O_CLOEXEC)
377   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
378 #endif
379 #if defined(O_PATH)
380   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
381 #endif
382 #if defined(O_TMPFILE)
383   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
384 #endif
385   /* Don't terminate the list prematurely on 64-bit host+guest.  */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389   { 0, 0, 0, 0 }
390 };
391 
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
393 
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398           const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401                          const struct timespec times[2], int flags)
402 {
403     errno = ENOSYS;
404     return -1;
405 }
406 #endif
407 #endif /* TARGET_NR_utimensat */
408 
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413           const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416                          int newfd, const char *new, int flags)
417 {
418     if (flags == 0) {
419         return renameat(oldfd, old, newfd, new);
420     }
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_renameat2 */
426 
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
429 
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
432 {
433   return (inotify_init());
434 }
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
438 {
439   return (inotify_add_watch(fd, pathname, mask));
440 }
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
444 {
445   return (inotify_rm_watch(fd, wd));
446 }
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
451 {
452   return (inotify_init1(flags));
453 }
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY  */
463 
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471     uint64_t rlim_cur;
472     uint64_t rlim_max;
473 };
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475           const struct host_rlimit64 *, new_limit,
476           struct host_rlimit64 *, old_limit)
477 #endif
478 
479 
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
483 
484 static inline int next_free_host_timer(void)
485 {
486     int k ;
487     /* FIXME: Does finding the next free slot require a lock? */
488     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489         if (g_posix_timers[k] == 0) {
490             g_posix_timers[k] = (timer_t) 1;
491             return k;
492         }
493     }
494     return -1;
495 }
496 #endif
497 
498 #define ERRNO_TABLE_SIZE 1200
499 
500 /* target_to_host_errno_table[] is initialized from
501  * host_to_target_errno_table[] in syscall_init(). */
502 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
503 };
504 
505 /*
506  * This list is the union of errno values overridden in asm-<arch>/errno.h
507  * minus the errnos that are not actually generic to all archs.
508  */
509 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
510     [EAGAIN]		= TARGET_EAGAIN,
511     [EIDRM]		= TARGET_EIDRM,
512     [ECHRNG]		= TARGET_ECHRNG,
513     [EL2NSYNC]		= TARGET_EL2NSYNC,
514     [EL3HLT]		= TARGET_EL3HLT,
515     [EL3RST]		= TARGET_EL3RST,
516     [ELNRNG]		= TARGET_ELNRNG,
517     [EUNATCH]		= TARGET_EUNATCH,
518     [ENOCSI]		= TARGET_ENOCSI,
519     [EL2HLT]		= TARGET_EL2HLT,
520     [EDEADLK]		= TARGET_EDEADLK,
521     [ENOLCK]		= TARGET_ENOLCK,
522     [EBADE]		= TARGET_EBADE,
523     [EBADR]		= TARGET_EBADR,
524     [EXFULL]		= TARGET_EXFULL,
525     [ENOANO]		= TARGET_ENOANO,
526     [EBADRQC]		= TARGET_EBADRQC,
527     [EBADSLT]		= TARGET_EBADSLT,
528     [EBFONT]		= TARGET_EBFONT,
529     [ENOSTR]		= TARGET_ENOSTR,
530     [ENODATA]		= TARGET_ENODATA,
531     [ETIME]		= TARGET_ETIME,
532     [ENOSR]		= TARGET_ENOSR,
533     [ENONET]		= TARGET_ENONET,
534     [ENOPKG]		= TARGET_ENOPKG,
535     [EREMOTE]		= TARGET_EREMOTE,
536     [ENOLINK]		= TARGET_ENOLINK,
537     [EADV]		= TARGET_EADV,
538     [ESRMNT]		= TARGET_ESRMNT,
539     [ECOMM]		= TARGET_ECOMM,
540     [EPROTO]		= TARGET_EPROTO,
541     [EDOTDOT]		= TARGET_EDOTDOT,
542     [EMULTIHOP]		= TARGET_EMULTIHOP,
543     [EBADMSG]		= TARGET_EBADMSG,
544     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
545     [EOVERFLOW]		= TARGET_EOVERFLOW,
546     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
547     [EBADFD]		= TARGET_EBADFD,
548     [EREMCHG]		= TARGET_EREMCHG,
549     [ELIBACC]		= TARGET_ELIBACC,
550     [ELIBBAD]		= TARGET_ELIBBAD,
551     [ELIBSCN]		= TARGET_ELIBSCN,
552     [ELIBMAX]		= TARGET_ELIBMAX,
553     [ELIBEXEC]		= TARGET_ELIBEXEC,
554     [EILSEQ]		= TARGET_EILSEQ,
555     [ENOSYS]		= TARGET_ENOSYS,
556     [ELOOP]		= TARGET_ELOOP,
557     [ERESTART]		= TARGET_ERESTART,
558     [ESTRPIPE]		= TARGET_ESTRPIPE,
559     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
560     [EUSERS]		= TARGET_EUSERS,
561     [ENOTSOCK]		= TARGET_ENOTSOCK,
562     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
563     [EMSGSIZE]		= TARGET_EMSGSIZE,
564     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
565     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
566     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
567     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
568     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
569     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
570     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
571     [EADDRINUSE]	= TARGET_EADDRINUSE,
572     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
573     [ENETDOWN]		= TARGET_ENETDOWN,
574     [ENETUNREACH]	= TARGET_ENETUNREACH,
575     [ENETRESET]		= TARGET_ENETRESET,
576     [ECONNABORTED]	= TARGET_ECONNABORTED,
577     [ECONNRESET]	= TARGET_ECONNRESET,
578     [ENOBUFS]		= TARGET_ENOBUFS,
579     [EISCONN]		= TARGET_EISCONN,
580     [ENOTCONN]		= TARGET_ENOTCONN,
581     [EUCLEAN]		= TARGET_EUCLEAN,
582     [ENOTNAM]		= TARGET_ENOTNAM,
583     [ENAVAIL]		= TARGET_ENAVAIL,
584     [EISNAM]		= TARGET_EISNAM,
585     [EREMOTEIO]		= TARGET_EREMOTEIO,
586     [EDQUOT]            = TARGET_EDQUOT,
587     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
588     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
589     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
590     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
591     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
592     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
593     [EALREADY]		= TARGET_EALREADY,
594     [EINPROGRESS]	= TARGET_EINPROGRESS,
595     [ESTALE]		= TARGET_ESTALE,
596     [ECANCELED]		= TARGET_ECANCELED,
597     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
598     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
599 #ifdef ENOKEY
600     [ENOKEY]		= TARGET_ENOKEY,
601 #endif
602 #ifdef EKEYEXPIRED
603     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
604 #endif
605 #ifdef EKEYREVOKED
606     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
607 #endif
608 #ifdef EKEYREJECTED
609     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
610 #endif
611 #ifdef EOWNERDEAD
612     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
613 #endif
614 #ifdef ENOTRECOVERABLE
615     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
616 #endif
617 #ifdef ENOMSG
618     [ENOMSG]            = TARGET_ENOMSG,
619 #endif
620 #ifdef ERKFILL
621     [ERFKILL]           = TARGET_ERFKILL,
622 #endif
623 #ifdef EHWPOISON
624     [EHWPOISON]         = TARGET_EHWPOISON,
625 #endif
626 };
627 
628 static inline int host_to_target_errno(int err)
629 {
630     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
631         host_to_target_errno_table[err]) {
632         return host_to_target_errno_table[err];
633     }
634     return err;
635 }
636 
637 static inline int target_to_host_errno(int err)
638 {
639     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
640         target_to_host_errno_table[err]) {
641         return target_to_host_errno_table[err];
642     }
643     return err;
644 }
645 
646 static inline abi_long get_errno(abi_long ret)
647 {
648     if (ret == -1)
649         return -host_to_target_errno(errno);
650     else
651         return ret;
652 }
653 
654 const char *target_strerror(int err)
655 {
656     if (err == TARGET_ERESTARTSYS) {
657         return "To be restarted";
658     }
659     if (err == TARGET_QEMU_ESIGRETURN) {
660         return "Successful exit from sigreturn";
661     }
662 
663     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
664         return NULL;
665     }
666     return strerror(target_to_host_errno(err));
667 }
668 
669 #define safe_syscall0(type, name) \
670 static type safe_##name(void) \
671 { \
672     return safe_syscall(__NR_##name); \
673 }
674 
675 #define safe_syscall1(type, name, type1, arg1) \
676 static type safe_##name(type1 arg1) \
677 { \
678     return safe_syscall(__NR_##name, arg1); \
679 }
680 
681 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
682 static type safe_##name(type1 arg1, type2 arg2) \
683 { \
684     return safe_syscall(__NR_##name, arg1, arg2); \
685 }
686 
687 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
689 { \
690     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
691 }
692 
693 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
694     type4, arg4) \
695 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
696 { \
697     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
698 }
699 
700 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
701     type4, arg4, type5, arg5) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
703     type5 arg5) \
704 { \
705     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
706 }
707 
708 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
709     type4, arg4, type5, arg5, type6, arg6) \
710 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
711     type5 arg5, type6 arg6) \
712 { \
713     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
714 }
715 
716 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
717 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
718 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
719               int, flags, mode_t, mode)
720 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
721 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
722               struct rusage *, rusage)
723 #endif
724 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
725               int, options, struct rusage *, rusage)
726 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
727 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
728     defined(TARGET_NR_pselect6)
729 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
730               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
731 #endif
732 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
733 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
734               struct timespec *, tsp, const sigset_t *, sigmask,
735               size_t, sigsetsize)
736 #endif
737 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
738               int, maxevents, int, timeout, const sigset_t *, sigmask,
739               size_t, sigsetsize)
740 #if defined(__NR_futex)
741 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
742               const struct timespec *,timeout,int *,uaddr2,int,val3)
743 #endif
744 #if defined(__NR_futex_time64)
745 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
746               const struct timespec *,timeout,int *,uaddr2,int,val3)
747 #endif
748 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
749 safe_syscall2(int, kill, pid_t, pid, int, sig)
750 safe_syscall2(int, tkill, int, tid, int, sig)
751 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
752 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
753 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
754 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
755               unsigned long, pos_l, unsigned long, pos_h)
756 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
757               unsigned long, pos_l, unsigned long, pos_h)
758 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
759               socklen_t, addrlen)
760 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
761               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
762 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
763               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
764 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
765 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
766 safe_syscall2(int, flock, int, fd, int, operation)
767 #ifdef TARGET_NR_rt_sigtimedwait
768 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
769               const struct timespec *, uts, size_t, sigsetsize)
770 #endif
771 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
772               int, flags)
773 #if defined(TARGET_NR_nanosleep)
774 safe_syscall2(int, nanosleep, const struct timespec *, req,
775               struct timespec *, rem)
776 #endif
777 #ifdef TARGET_NR_clock_nanosleep
778 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
779               const struct timespec *, req, struct timespec *, rem)
780 #endif
781 #ifdef __NR_ipc
782 #ifdef __s390x__
783 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
784               void *, ptr)
785 #else
786 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
787               void *, ptr, long, fifth)
788 #endif
789 #endif
790 #ifdef __NR_msgsnd
791 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
792               int, flags)
793 #endif
794 #ifdef __NR_msgrcv
795 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
796               long, msgtype, int, flags)
797 #endif
798 #ifdef __NR_semtimedop
799 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
800               unsigned, nsops, const struct timespec *, timeout)
801 #endif
802 #ifdef TARGET_NR_mq_timedsend
803 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
804               size_t, len, unsigned, prio, const struct timespec *, timeout)
805 #endif
806 #ifdef TARGET_NR_mq_timedreceive
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808               size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811  * "third argument might be integer or pointer or not present" behaviour of
812  * the libc function.
813  */
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817  *  use the flock64 struct rather than unsuffixed flock
818  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
819  */
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
825 
826 static inline int host_to_target_sock_type(int host_type)
827 {
828     int target_type;
829 
830     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831     case SOCK_DGRAM:
832         target_type = TARGET_SOCK_DGRAM;
833         break;
834     case SOCK_STREAM:
835         target_type = TARGET_SOCK_STREAM;
836         break;
837     default:
838         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839         break;
840     }
841 
842 #if defined(SOCK_CLOEXEC)
843     if (host_type & SOCK_CLOEXEC) {
844         target_type |= TARGET_SOCK_CLOEXEC;
845     }
846 #endif
847 
848 #if defined(SOCK_NONBLOCK)
849     if (host_type & SOCK_NONBLOCK) {
850         target_type |= TARGET_SOCK_NONBLOCK;
851     }
852 #endif
853 
854     return target_type;
855 }
856 
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
860 
861 void target_set_brk(abi_ulong new_brk)
862 {
863     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864     brk_page = HOST_PAGE_ALIGN(target_brk);
865 }
866 
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
869 
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
872 {
873     abi_long mapped_addr;
874     abi_ulong new_alloc_size;
875 
876     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
877 
878     if (!new_brk) {
879         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880         return target_brk;
881     }
882     if (new_brk < target_original_brk) {
883         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884                    target_brk);
885         return target_brk;
886     }
887 
888     /* If the new brk is less than the highest page reserved to the
889      * target heap allocation, set it and we're almost done...  */
890     if (new_brk <= brk_page) {
891         /* Heap contents are initialized to zero, as for anonymous
892          * mapped pages.  */
893         if (new_brk > target_brk) {
894             memset(g2h(target_brk), 0, new_brk - target_brk);
895         }
896 	target_brk = new_brk;
897         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 	return target_brk;
899     }
900 
901     /* We need to allocate more memory after the brk... Note that
902      * we don't use MAP_FIXED because that will map over the top of
903      * any existing mapping (like the one with the host libc or qemu
904      * itself); instead we treat "mapped but at wrong address" as
905      * a failure and unmap again.
906      */
907     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909                                         PROT_READ|PROT_WRITE,
910                                         MAP_ANON|MAP_PRIVATE, 0, 0));
911 
912     if (mapped_addr == brk_page) {
913         /* Heap contents are initialized to zero, as for anonymous
914          * mapped pages.  Technically the new pages are already
915          * initialized to zero since they *are* anonymous mapped
916          * pages, however we have to take care with the contents that
917          * come from the remaining part of the previous page: it may
918          * contains garbage data due to a previous heap usage (grown
919          * then shrunken).  */
920         memset(g2h(target_brk), 0, brk_page - target_brk);
921 
922         target_brk = new_brk;
923         brk_page = HOST_PAGE_ALIGN(target_brk);
924         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925             target_brk);
926         return target_brk;
927     } else if (mapped_addr != -1) {
928         /* Mapped but at wrong address, meaning there wasn't actually
929          * enough space for this brk.
930          */
931         target_munmap(mapped_addr, new_alloc_size);
932         mapped_addr = -1;
933         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
934     }
935     else {
936         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
937     }
938 
939 #if defined(TARGET_ALPHA)
940     /* We (partially) emulate OSF/1 on Alpha, which requires we
941        return a proper errno, not an unchanged brk value.  */
942     return -TARGET_ENOMEM;
943 #endif
944     /* For everything else, return the previous break. */
945     return target_brk;
946 }
947 
948 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
949     defined(TARGET_NR_pselect6)
950 static inline abi_long copy_from_user_fdset(fd_set *fds,
951                                             abi_ulong target_fds_addr,
952                                             int n)
953 {
954     int i, nw, j, k;
955     abi_ulong b, *target_fds;
956 
957     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958     if (!(target_fds = lock_user(VERIFY_READ,
959                                  target_fds_addr,
960                                  sizeof(abi_ulong) * nw,
961                                  1)))
962         return -TARGET_EFAULT;
963 
964     FD_ZERO(fds);
965     k = 0;
966     for (i = 0; i < nw; i++) {
967         /* grab the abi_ulong */
968         __get_user(b, &target_fds[i]);
969         for (j = 0; j < TARGET_ABI_BITS; j++) {
970             /* check the bit inside the abi_ulong */
971             if ((b >> j) & 1)
972                 FD_SET(k, fds);
973             k++;
974         }
975     }
976 
977     unlock_user(target_fds, target_fds_addr, 0);
978 
979     return 0;
980 }
981 
982 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
983                                                  abi_ulong target_fds_addr,
984                                                  int n)
985 {
986     if (target_fds_addr) {
987         if (copy_from_user_fdset(fds, target_fds_addr, n))
988             return -TARGET_EFAULT;
989         *fds_ptr = fds;
990     } else {
991         *fds_ptr = NULL;
992     }
993     return 0;
994 }
995 
996 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
997                                           const fd_set *fds,
998                                           int n)
999 {
1000     int i, nw, j, k;
1001     abi_long v;
1002     abi_ulong *target_fds;
1003 
1004     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1005     if (!(target_fds = lock_user(VERIFY_WRITE,
1006                                  target_fds_addr,
1007                                  sizeof(abi_ulong) * nw,
1008                                  0)))
1009         return -TARGET_EFAULT;
1010 
1011     k = 0;
1012     for (i = 0; i < nw; i++) {
1013         v = 0;
1014         for (j = 0; j < TARGET_ABI_BITS; j++) {
1015             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1016             k++;
1017         }
1018         __put_user(v, &target_fds[i]);
1019     }
1020 
1021     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1022 
1023     return 0;
1024 }
1025 #endif
1026 
1027 #if defined(__alpha__)
1028 #define HOST_HZ 1024
1029 #else
1030 #define HOST_HZ 100
1031 #endif
1032 
1033 static inline abi_long host_to_target_clock_t(long ticks)
1034 {
1035 #if HOST_HZ == TARGET_HZ
1036     return ticks;
1037 #else
1038     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1039 #endif
1040 }
1041 
1042 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1043                                              const struct rusage *rusage)
1044 {
1045     struct target_rusage *target_rusage;
1046 
1047     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1048         return -TARGET_EFAULT;
1049     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1050     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1051     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1052     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1053     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1054     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1055     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1056     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1057     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1058     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1059     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1060     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1061     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1062     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1063     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1064     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1065     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1066     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1067     unlock_user_struct(target_rusage, target_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 #ifdef TARGET_NR_setrlimit
1073 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1074 {
1075     abi_ulong target_rlim_swap;
1076     rlim_t result;
1077 
1078     target_rlim_swap = tswapal(target_rlim);
1079     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1080         return RLIM_INFINITY;
1081 
1082     result = target_rlim_swap;
1083     if (target_rlim_swap != (rlim_t)result)
1084         return RLIM_INFINITY;
1085 
1086     return result;
1087 }
1088 #endif
1089 
1090 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1091 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1092 {
1093     abi_ulong target_rlim_swap;
1094     abi_ulong result;
1095 
1096     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1097         target_rlim_swap = TARGET_RLIM_INFINITY;
1098     else
1099         target_rlim_swap = rlim;
1100     result = tswapal(target_rlim_swap);
1101 
1102     return result;
1103 }
1104 #endif
1105 
1106 static inline int target_to_host_resource(int code)
1107 {
1108     switch (code) {
1109     case TARGET_RLIMIT_AS:
1110         return RLIMIT_AS;
1111     case TARGET_RLIMIT_CORE:
1112         return RLIMIT_CORE;
1113     case TARGET_RLIMIT_CPU:
1114         return RLIMIT_CPU;
1115     case TARGET_RLIMIT_DATA:
1116         return RLIMIT_DATA;
1117     case TARGET_RLIMIT_FSIZE:
1118         return RLIMIT_FSIZE;
1119     case TARGET_RLIMIT_LOCKS:
1120         return RLIMIT_LOCKS;
1121     case TARGET_RLIMIT_MEMLOCK:
1122         return RLIMIT_MEMLOCK;
1123     case TARGET_RLIMIT_MSGQUEUE:
1124         return RLIMIT_MSGQUEUE;
1125     case TARGET_RLIMIT_NICE:
1126         return RLIMIT_NICE;
1127     case TARGET_RLIMIT_NOFILE:
1128         return RLIMIT_NOFILE;
1129     case TARGET_RLIMIT_NPROC:
1130         return RLIMIT_NPROC;
1131     case TARGET_RLIMIT_RSS:
1132         return RLIMIT_RSS;
1133     case TARGET_RLIMIT_RTPRIO:
1134         return RLIMIT_RTPRIO;
1135     case TARGET_RLIMIT_SIGPENDING:
1136         return RLIMIT_SIGPENDING;
1137     case TARGET_RLIMIT_STACK:
1138         return RLIMIT_STACK;
1139     default:
1140         return code;
1141     }
1142 }
1143 
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145                                               abi_ulong target_tv_addr)
1146 {
1147     struct target_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __get_user(tv->tv_sec, &target_tv->tv_sec);
1154     __get_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 0);
1157 
1158     return 0;
1159 }
1160 
1161 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1162                                             const struct timeval *tv)
1163 {
1164     struct target_timeval *target_tv;
1165 
1166     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1167         return -TARGET_EFAULT;
1168     }
1169 
1170     __put_user(tv->tv_sec, &target_tv->tv_sec);
1171     __put_user(tv->tv_usec, &target_tv->tv_usec);
1172 
1173     unlock_user_struct(target_tv, target_tv_addr, 1);
1174 
1175     return 0;
1176 }
1177 
1178 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1179                                              const struct timeval *tv)
1180 {
1181     struct target__kernel_sock_timeval *target_tv;
1182 
1183     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1184         return -TARGET_EFAULT;
1185     }
1186 
1187     __put_user(tv->tv_sec, &target_tv->tv_sec);
1188     __put_user(tv->tv_usec, &target_tv->tv_usec);
1189 
1190     unlock_user_struct(target_tv, target_tv_addr, 1);
1191 
1192     return 0;
1193 }
1194 
1195 #if defined(TARGET_NR_futex) || \
1196     defined(TARGET_NR_rt_sigtimedwait) || \
1197     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202     defined(TARGET_NR_timer_settime) || \
1203     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1205                                                abi_ulong target_addr)
1206 {
1207     struct target_timespec *target_ts;
1208 
1209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210         return -TARGET_EFAULT;
1211     }
1212     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220     defined(TARGET_NR_timer_settime64) || \
1221     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1222 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1223                                                  abi_ulong target_addr)
1224 {
1225     struct target__kernel_timespec *target_ts;
1226 
1227     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1228         return -TARGET_EFAULT;
1229     }
1230     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1231     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1232     unlock_user_struct(target_ts, target_addr, 0);
1233     return 0;
1234 }
1235 #endif
1236 
1237 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1238                                                struct timespec *host_ts)
1239 {
1240     struct target_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
1251 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1252                                                  struct timespec *host_ts)
1253 {
1254     struct target__kernel_timespec *target_ts;
1255 
1256     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1257         return -TARGET_EFAULT;
1258     }
1259     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1260     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1261     unlock_user_struct(target_ts, target_addr, 1);
1262     return 0;
1263 }
1264 
1265 #if defined(TARGET_NR_gettimeofday)
1266 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1267                                              struct timezone *tz)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 1);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_settimeofday)
1285 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1286                                                abi_ulong target_tz_addr)
1287 {
1288     struct target_timezone *target_tz;
1289 
1290     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1291         return -TARGET_EFAULT;
1292     }
1293 
1294     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1295     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1296 
1297     unlock_user_struct(target_tz, target_tz_addr, 0);
1298 
1299     return 0;
1300 }
1301 #endif
1302 
1303 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1304 #include <mqueue.h>
1305 
1306 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1307                                               abi_ulong target_mq_attr_addr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1312                           target_mq_attr_addr, 1))
1313         return -TARGET_EFAULT;
1314 
1315     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1321 
1322     return 0;
1323 }
1324 
1325 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1326                                             const struct mq_attr *attr)
1327 {
1328     struct target_mq_attr *target_mq_attr;
1329 
1330     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1331                           target_mq_attr_addr, 0))
1332         return -TARGET_EFAULT;
1333 
1334     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1335     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1336     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1337     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1338 
1339     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1340 
1341     return 0;
1342 }
1343 #endif
1344 
1345 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1346 /* do_select() must return target values and target errnos. */
1347 static abi_long do_select(int n,
1348                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1349                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1350 {
1351     fd_set rfds, wfds, efds;
1352     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1353     struct timeval tv;
1354     struct timespec ts, *ts_ptr;
1355     abi_long ret;
1356 
1357     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1358     if (ret) {
1359         return ret;
1360     }
1361     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1362     if (ret) {
1363         return ret;
1364     }
1365     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1366     if (ret) {
1367         return ret;
1368     }
1369 
1370     if (target_tv_addr) {
1371         if (copy_from_user_timeval(&tv, target_tv_addr))
1372             return -TARGET_EFAULT;
1373         ts.tv_sec = tv.tv_sec;
1374         ts.tv_nsec = tv.tv_usec * 1000;
1375         ts_ptr = &ts;
1376     } else {
1377         ts_ptr = NULL;
1378     }
1379 
1380     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1381                                   ts_ptr, NULL));
1382 
1383     if (!is_error(ret)) {
1384         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1385             return -TARGET_EFAULT;
1386         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1387             return -TARGET_EFAULT;
1388         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1389             return -TARGET_EFAULT;
1390 
1391         if (target_tv_addr) {
1392             tv.tv_sec = ts.tv_sec;
1393             tv.tv_usec = ts.tv_nsec / 1000;
1394             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1395                 return -TARGET_EFAULT;
1396             }
1397         }
1398     }
1399 
1400     return ret;
1401 }
1402 
1403 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1404 static abi_long do_old_select(abi_ulong arg1)
1405 {
1406     struct target_sel_arg_struct *sel;
1407     abi_ulong inp, outp, exp, tvp;
1408     long nsel;
1409 
1410     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1411         return -TARGET_EFAULT;
1412     }
1413 
1414     nsel = tswapal(sel->n);
1415     inp = tswapal(sel->inp);
1416     outp = tswapal(sel->outp);
1417     exp = tswapal(sel->exp);
1418     tvp = tswapal(sel->tvp);
1419 
1420     unlock_user_struct(sel, arg1, 0);
1421 
1422     return do_select(nsel, inp, outp, exp, tvp);
1423 }
1424 #endif
1425 #endif
1426 
1427 static abi_long do_pipe2(int host_pipe[], int flags)
1428 {
1429 #ifdef CONFIG_PIPE2
1430     return pipe2(host_pipe, flags);
1431 #else
1432     return -ENOSYS;
1433 #endif
1434 }
1435 
1436 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1437                         int flags, int is_pipe2)
1438 {
1439     int host_pipe[2];
1440     abi_long ret;
1441     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1442 
1443     if (is_error(ret))
1444         return get_errno(ret);
1445 
1446     /* Several targets have special calling conventions for the original
1447        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1448     if (!is_pipe2) {
1449 #if defined(TARGET_ALPHA)
1450         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1451         return host_pipe[0];
1452 #elif defined(TARGET_MIPS)
1453         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1454         return host_pipe[0];
1455 #elif defined(TARGET_SH4)
1456         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1457         return host_pipe[0];
1458 #elif defined(TARGET_SPARC)
1459         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1460         return host_pipe[0];
1461 #endif
1462     }
1463 
1464     if (put_user_s32(host_pipe[0], pipedes)
1465         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1466         return -TARGET_EFAULT;
1467     return get_errno(ret);
1468 }
1469 
1470 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1471                                               abi_ulong target_addr,
1472                                               socklen_t len)
1473 {
1474     struct target_ip_mreqn *target_smreqn;
1475 
1476     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1477     if (!target_smreqn)
1478         return -TARGET_EFAULT;
1479     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1480     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1481     if (len == sizeof(struct target_ip_mreqn))
1482         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1483     unlock_user(target_smreqn, target_addr, 0);
1484 
1485     return 0;
1486 }
1487 
1488 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1489                                                abi_ulong target_addr,
1490                                                socklen_t len)
1491 {
1492     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1493     sa_family_t sa_family;
1494     struct target_sockaddr *target_saddr;
1495 
1496     if (fd_trans_target_to_host_addr(fd)) {
1497         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1498     }
1499 
1500     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1501     if (!target_saddr)
1502         return -TARGET_EFAULT;
1503 
1504     sa_family = tswap16(target_saddr->sa_family);
1505 
1506     /* Oops. The caller might send a incomplete sun_path; sun_path
1507      * must be terminated by \0 (see the manual page), but
1508      * unfortunately it is quite common to specify sockaddr_un
1509      * length as "strlen(x->sun_path)" while it should be
1510      * "strlen(...) + 1". We'll fix that here if needed.
1511      * Linux kernel has a similar feature.
1512      */
1513 
1514     if (sa_family == AF_UNIX) {
1515         if (len < unix_maxlen && len > 0) {
1516             char *cp = (char*)target_saddr;
1517 
1518             if ( cp[len-1] && !cp[len] )
1519                 len++;
1520         }
1521         if (len > unix_maxlen)
1522             len = unix_maxlen;
1523     }
1524 
1525     memcpy(addr, target_saddr, len);
1526     addr->sa_family = sa_family;
1527     if (sa_family == AF_NETLINK) {
1528         struct sockaddr_nl *nladdr;
1529 
1530         nladdr = (struct sockaddr_nl *)addr;
1531         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1532         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1533     } else if (sa_family == AF_PACKET) {
1534 	struct target_sockaddr_ll *lladdr;
1535 
1536 	lladdr = (struct target_sockaddr_ll *)addr;
1537 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1538 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1539     }
1540     unlock_user(target_saddr, target_addr, 0);
1541 
1542     return 0;
1543 }
1544 
1545 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1546                                                struct sockaddr *addr,
1547                                                socklen_t len)
1548 {
1549     struct target_sockaddr *target_saddr;
1550 
1551     if (len == 0) {
1552         return 0;
1553     }
1554     assert(addr);
1555 
1556     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1557     if (!target_saddr)
1558         return -TARGET_EFAULT;
1559     memcpy(target_saddr, addr, len);
1560     if (len >= offsetof(struct target_sockaddr, sa_family) +
1561         sizeof(target_saddr->sa_family)) {
1562         target_saddr->sa_family = tswap16(addr->sa_family);
1563     }
1564     if (addr->sa_family == AF_NETLINK &&
1565         len >= sizeof(struct target_sockaddr_nl)) {
1566         struct target_sockaddr_nl *target_nl =
1567                (struct target_sockaddr_nl *)target_saddr;
1568         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1569         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1570     } else if (addr->sa_family == AF_PACKET) {
1571         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1572         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1573         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1574     } else if (addr->sa_family == AF_INET6 &&
1575                len >= sizeof(struct target_sockaddr_in6)) {
1576         struct target_sockaddr_in6 *target_in6 =
1577                (struct target_sockaddr_in6 *)target_saddr;
1578         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1579     }
1580     unlock_user(target_saddr, target_addr, len);
1581 
1582     return 0;
1583 }
1584 
1585 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1586                                            struct target_msghdr *target_msgh)
1587 {
1588     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1589     abi_long msg_controllen;
1590     abi_ulong target_cmsg_addr;
1591     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1592     socklen_t space = 0;
1593 
1594     msg_controllen = tswapal(target_msgh->msg_controllen);
1595     if (msg_controllen < sizeof (struct target_cmsghdr))
1596         goto the_end;
1597     target_cmsg_addr = tswapal(target_msgh->msg_control);
1598     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1599     target_cmsg_start = target_cmsg;
1600     if (!target_cmsg)
1601         return -TARGET_EFAULT;
1602 
1603     while (cmsg && target_cmsg) {
1604         void *data = CMSG_DATA(cmsg);
1605         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1606 
1607         int len = tswapal(target_cmsg->cmsg_len)
1608             - sizeof(struct target_cmsghdr);
1609 
1610         space += CMSG_SPACE(len);
1611         if (space > msgh->msg_controllen) {
1612             space -= CMSG_SPACE(len);
1613             /* This is a QEMU bug, since we allocated the payload
1614              * area ourselves (unlike overflow in host-to-target
1615              * conversion, which is just the guest giving us a buffer
1616              * that's too small). It can't happen for the payload types
1617              * we currently support; if it becomes an issue in future
1618              * we would need to improve our allocation strategy to
1619              * something more intelligent than "twice the size of the
1620              * target buffer we're reading from".
1621              */
1622             qemu_log_mask(LOG_UNIMP,
1623                           ("Unsupported ancillary data %d/%d: "
1624                            "unhandled msg size\n"),
1625                           tswap32(target_cmsg->cmsg_level),
1626                           tswap32(target_cmsg->cmsg_type));
1627             break;
1628         }
1629 
1630         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1631             cmsg->cmsg_level = SOL_SOCKET;
1632         } else {
1633             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1634         }
1635         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1636         cmsg->cmsg_len = CMSG_LEN(len);
1637 
1638         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1639             int *fd = (int *)data;
1640             int *target_fd = (int *)target_data;
1641             int i, numfds = len / sizeof(int);
1642 
1643             for (i = 0; i < numfds; i++) {
1644                 __get_user(fd[i], target_fd + i);
1645             }
1646         } else if (cmsg->cmsg_level == SOL_SOCKET
1647                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1648             struct ucred *cred = (struct ucred *)data;
1649             struct target_ucred *target_cred =
1650                 (struct target_ucred *)target_data;
1651 
1652             __get_user(cred->pid, &target_cred->pid);
1653             __get_user(cred->uid, &target_cred->uid);
1654             __get_user(cred->gid, &target_cred->gid);
1655         } else {
1656             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1657                           cmsg->cmsg_level, cmsg->cmsg_type);
1658             memcpy(data, target_data, len);
1659         }
1660 
1661         cmsg = CMSG_NXTHDR(msgh, cmsg);
1662         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1663                                          target_cmsg_start);
1664     }
1665     unlock_user(target_cmsg, target_cmsg_addr, 0);
1666  the_end:
1667     msgh->msg_controllen = space;
1668     return 0;
1669 }
1670 
1671 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1672                                            struct msghdr *msgh)
1673 {
1674     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1675     abi_long msg_controllen;
1676     abi_ulong target_cmsg_addr;
1677     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1678     socklen_t space = 0;
1679 
1680     msg_controllen = tswapal(target_msgh->msg_controllen);
1681     if (msg_controllen < sizeof (struct target_cmsghdr))
1682         goto the_end;
1683     target_cmsg_addr = tswapal(target_msgh->msg_control);
1684     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1685     target_cmsg_start = target_cmsg;
1686     if (!target_cmsg)
1687         return -TARGET_EFAULT;
1688 
1689     while (cmsg && target_cmsg) {
1690         void *data = CMSG_DATA(cmsg);
1691         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1692 
1693         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1694         int tgt_len, tgt_space;
1695 
1696         /* We never copy a half-header but may copy half-data;
1697          * this is Linux's behaviour in put_cmsg(). Note that
1698          * truncation here is a guest problem (which we report
1699          * to the guest via the CTRUNC bit), unlike truncation
1700          * in target_to_host_cmsg, which is a QEMU bug.
1701          */
1702         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1703             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1704             break;
1705         }
1706 
1707         if (cmsg->cmsg_level == SOL_SOCKET) {
1708             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1709         } else {
1710             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1711         }
1712         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1713 
1714         /* Payload types which need a different size of payload on
1715          * the target must adjust tgt_len here.
1716          */
1717         tgt_len = len;
1718         switch (cmsg->cmsg_level) {
1719         case SOL_SOCKET:
1720             switch (cmsg->cmsg_type) {
1721             case SO_TIMESTAMP:
1722                 tgt_len = sizeof(struct target_timeval);
1723                 break;
1724             default:
1725                 break;
1726             }
1727             break;
1728         default:
1729             break;
1730         }
1731 
1732         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1733             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1734             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1735         }
1736 
1737         /* We must now copy-and-convert len bytes of payload
1738          * into tgt_len bytes of destination space. Bear in mind
1739          * that in both source and destination we may be dealing
1740          * with a truncated value!
1741          */
1742         switch (cmsg->cmsg_level) {
1743         case SOL_SOCKET:
1744             switch (cmsg->cmsg_type) {
1745             case SCM_RIGHTS:
1746             {
1747                 int *fd = (int *)data;
1748                 int *target_fd = (int *)target_data;
1749                 int i, numfds = tgt_len / sizeof(int);
1750 
1751                 for (i = 0; i < numfds; i++) {
1752                     __put_user(fd[i], target_fd + i);
1753                 }
1754                 break;
1755             }
1756             case SO_TIMESTAMP:
1757             {
1758                 struct timeval *tv = (struct timeval *)data;
1759                 struct target_timeval *target_tv =
1760                     (struct target_timeval *)target_data;
1761 
1762                 if (len != sizeof(struct timeval) ||
1763                     tgt_len != sizeof(struct target_timeval)) {
1764                     goto unimplemented;
1765                 }
1766 
1767                 /* copy struct timeval to target */
1768                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1769                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1770                 break;
1771             }
1772             case SCM_CREDENTIALS:
1773             {
1774                 struct ucred *cred = (struct ucred *)data;
1775                 struct target_ucred *target_cred =
1776                     (struct target_ucred *)target_data;
1777 
1778                 __put_user(cred->pid, &target_cred->pid);
1779                 __put_user(cred->uid, &target_cred->uid);
1780                 __put_user(cred->gid, &target_cred->gid);
1781                 break;
1782             }
1783             default:
1784                 goto unimplemented;
1785             }
1786             break;
1787 
1788         case SOL_IP:
1789             switch (cmsg->cmsg_type) {
1790             case IP_TTL:
1791             {
1792                 uint32_t *v = (uint32_t *)data;
1793                 uint32_t *t_int = (uint32_t *)target_data;
1794 
1795                 if (len != sizeof(uint32_t) ||
1796                     tgt_len != sizeof(uint32_t)) {
1797                     goto unimplemented;
1798                 }
1799                 __put_user(*v, t_int);
1800                 break;
1801             }
1802             case IP_RECVERR:
1803             {
1804                 struct errhdr_t {
1805                    struct sock_extended_err ee;
1806                    struct sockaddr_in offender;
1807                 };
1808                 struct errhdr_t *errh = (struct errhdr_t *)data;
1809                 struct errhdr_t *target_errh =
1810                     (struct errhdr_t *)target_data;
1811 
1812                 if (len != sizeof(struct errhdr_t) ||
1813                     tgt_len != sizeof(struct errhdr_t)) {
1814                     goto unimplemented;
1815                 }
1816                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1817                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1818                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1819                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1820                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1821                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1822                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1823                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1824                     (void *) &errh->offender, sizeof(errh->offender));
1825                 break;
1826             }
1827             default:
1828                 goto unimplemented;
1829             }
1830             break;
1831 
1832         case SOL_IPV6:
1833             switch (cmsg->cmsg_type) {
1834             case IPV6_HOPLIMIT:
1835             {
1836                 uint32_t *v = (uint32_t *)data;
1837                 uint32_t *t_int = (uint32_t *)target_data;
1838 
1839                 if (len != sizeof(uint32_t) ||
1840                     tgt_len != sizeof(uint32_t)) {
1841                     goto unimplemented;
1842                 }
1843                 __put_user(*v, t_int);
1844                 break;
1845             }
1846             case IPV6_RECVERR:
1847             {
1848                 struct errhdr6_t {
1849                    struct sock_extended_err ee;
1850                    struct sockaddr_in6 offender;
1851                 };
1852                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1853                 struct errhdr6_t *target_errh =
1854                     (struct errhdr6_t *)target_data;
1855 
1856                 if (len != sizeof(struct errhdr6_t) ||
1857                     tgt_len != sizeof(struct errhdr6_t)) {
1858                     goto unimplemented;
1859                 }
1860                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1861                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1862                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1863                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1864                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1865                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1866                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1867                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1868                     (void *) &errh->offender, sizeof(errh->offender));
1869                 break;
1870             }
1871             default:
1872                 goto unimplemented;
1873             }
1874             break;
1875 
1876         default:
1877         unimplemented:
1878             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1879                           cmsg->cmsg_level, cmsg->cmsg_type);
1880             memcpy(target_data, data, MIN(len, tgt_len));
1881             if (tgt_len > len) {
1882                 memset(target_data + len, 0, tgt_len - len);
1883             }
1884         }
1885 
1886         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1887         tgt_space = TARGET_CMSG_SPACE(tgt_len);
1888         if (msg_controllen < tgt_space) {
1889             tgt_space = msg_controllen;
1890         }
1891         msg_controllen -= tgt_space;
1892         space += tgt_space;
1893         cmsg = CMSG_NXTHDR(msgh, cmsg);
1894         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1895                                          target_cmsg_start);
1896     }
1897     unlock_user(target_cmsg, target_cmsg_addr, space);
1898  the_end:
1899     target_msgh->msg_controllen = tswapal(space);
1900     return 0;
1901 }
1902 
1903 /* do_setsockopt() Must return target values and target errnos. */
1904 static abi_long do_setsockopt(int sockfd, int level, int optname,
1905                               abi_ulong optval_addr, socklen_t optlen)
1906 {
1907     abi_long ret;
1908     int val;
1909     struct ip_mreqn *ip_mreq;
1910     struct ip_mreq_source *ip_mreq_source;
1911 
1912     switch(level) {
1913     case SOL_TCP:
1914         /* TCP options all take an 'int' value.  */
1915         if (optlen < sizeof(uint32_t))
1916             return -TARGET_EINVAL;
1917 
1918         if (get_user_u32(val, optval_addr))
1919             return -TARGET_EFAULT;
1920         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1921         break;
1922     case SOL_IP:
1923         switch(optname) {
1924         case IP_TOS:
1925         case IP_TTL:
1926         case IP_HDRINCL:
1927         case IP_ROUTER_ALERT:
1928         case IP_RECVOPTS:
1929         case IP_RETOPTS:
1930         case IP_PKTINFO:
1931         case IP_MTU_DISCOVER:
1932         case IP_RECVERR:
1933         case IP_RECVTTL:
1934         case IP_RECVTOS:
1935 #ifdef IP_FREEBIND
1936         case IP_FREEBIND:
1937 #endif
1938         case IP_MULTICAST_TTL:
1939         case IP_MULTICAST_LOOP:
1940             val = 0;
1941             if (optlen >= sizeof(uint32_t)) {
1942                 if (get_user_u32(val, optval_addr))
1943                     return -TARGET_EFAULT;
1944             } else if (optlen >= 1) {
1945                 if (get_user_u8(val, optval_addr))
1946                     return -TARGET_EFAULT;
1947             }
1948             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1949             break;
1950         case IP_ADD_MEMBERSHIP:
1951         case IP_DROP_MEMBERSHIP:
1952             if (optlen < sizeof (struct target_ip_mreq) ||
1953                 optlen > sizeof (struct target_ip_mreqn))
1954                 return -TARGET_EINVAL;
1955 
1956             ip_mreq = (struct ip_mreqn *) alloca(optlen);
1957             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1958             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1959             break;
1960 
1961         case IP_BLOCK_SOURCE:
1962         case IP_UNBLOCK_SOURCE:
1963         case IP_ADD_SOURCE_MEMBERSHIP:
1964         case IP_DROP_SOURCE_MEMBERSHIP:
1965             if (optlen != sizeof (struct target_ip_mreq_source))
1966                 return -TARGET_EINVAL;
1967 
1968             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1969             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1970             unlock_user (ip_mreq_source, optval_addr, 0);
1971             break;
1972 
1973         default:
1974             goto unimplemented;
1975         }
1976         break;
1977     case SOL_IPV6:
1978         switch (optname) {
1979         case IPV6_MTU_DISCOVER:
1980         case IPV6_MTU:
1981         case IPV6_V6ONLY:
1982         case IPV6_RECVPKTINFO:
1983         case IPV6_UNICAST_HOPS:
1984         case IPV6_MULTICAST_HOPS:
1985         case IPV6_MULTICAST_LOOP:
1986         case IPV6_RECVERR:
1987         case IPV6_RECVHOPLIMIT:
1988         case IPV6_2292HOPLIMIT:
1989         case IPV6_CHECKSUM:
1990         case IPV6_ADDRFORM:
1991         case IPV6_2292PKTINFO:
1992         case IPV6_RECVTCLASS:
1993         case IPV6_RECVRTHDR:
1994         case IPV6_2292RTHDR:
1995         case IPV6_RECVHOPOPTS:
1996         case IPV6_2292HOPOPTS:
1997         case IPV6_RECVDSTOPTS:
1998         case IPV6_2292DSTOPTS:
1999         case IPV6_TCLASS:
2000 #ifdef IPV6_RECVPATHMTU
2001         case IPV6_RECVPATHMTU:
2002 #endif
2003 #ifdef IPV6_TRANSPARENT
2004         case IPV6_TRANSPARENT:
2005 #endif
2006 #ifdef IPV6_FREEBIND
2007         case IPV6_FREEBIND:
2008 #endif
2009 #ifdef IPV6_RECVORIGDSTADDR
2010         case IPV6_RECVORIGDSTADDR:
2011 #endif
2012             val = 0;
2013             if (optlen < sizeof(uint32_t)) {
2014                 return -TARGET_EINVAL;
2015             }
2016             if (get_user_u32(val, optval_addr)) {
2017                 return -TARGET_EFAULT;
2018             }
2019             ret = get_errno(setsockopt(sockfd, level, optname,
2020                                        &val, sizeof(val)));
2021             break;
2022         case IPV6_PKTINFO:
2023         {
2024             struct in6_pktinfo pki;
2025 
2026             if (optlen < sizeof(pki)) {
2027                 return -TARGET_EINVAL;
2028             }
2029 
2030             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2031                 return -TARGET_EFAULT;
2032             }
2033 
2034             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2035 
2036             ret = get_errno(setsockopt(sockfd, level, optname,
2037                                        &pki, sizeof(pki)));
2038             break;
2039         }
2040         case IPV6_ADD_MEMBERSHIP:
2041         case IPV6_DROP_MEMBERSHIP:
2042         {
2043             struct ipv6_mreq ipv6mreq;
2044 
2045             if (optlen < sizeof(ipv6mreq)) {
2046                 return -TARGET_EINVAL;
2047             }
2048 
2049             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2050                 return -TARGET_EFAULT;
2051             }
2052 
2053             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2054 
2055             ret = get_errno(setsockopt(sockfd, level, optname,
2056                                        &ipv6mreq, sizeof(ipv6mreq)));
2057             break;
2058         }
2059         default:
2060             goto unimplemented;
2061         }
2062         break;
2063     case SOL_ICMPV6:
2064         switch (optname) {
2065         case ICMPV6_FILTER:
2066         {
2067             struct icmp6_filter icmp6f;
2068 
2069             if (optlen > sizeof(icmp6f)) {
2070                 optlen = sizeof(icmp6f);
2071             }
2072 
2073             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2074                 return -TARGET_EFAULT;
2075             }
2076 
2077             for (val = 0; val < 8; val++) {
2078                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2079             }
2080 
2081             ret = get_errno(setsockopt(sockfd, level, optname,
2082                                        &icmp6f, optlen));
2083             break;
2084         }
2085         default:
2086             goto unimplemented;
2087         }
2088         break;
2089     case SOL_RAW:
2090         switch (optname) {
2091         case ICMP_FILTER:
2092         case IPV6_CHECKSUM:
2093             /* those take an u32 value */
2094             if (optlen < sizeof(uint32_t)) {
2095                 return -TARGET_EINVAL;
2096             }
2097 
2098             if (get_user_u32(val, optval_addr)) {
2099                 return -TARGET_EFAULT;
2100             }
2101             ret = get_errno(setsockopt(sockfd, level, optname,
2102                                        &val, sizeof(val)));
2103             break;
2104 
2105         default:
2106             goto unimplemented;
2107         }
2108         break;
2109 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2110     case SOL_ALG:
2111         switch (optname) {
2112         case ALG_SET_KEY:
2113         {
2114             char *alg_key = g_malloc(optlen);
2115 
2116             if (!alg_key) {
2117                 return -TARGET_ENOMEM;
2118             }
2119             if (copy_from_user(alg_key, optval_addr, optlen)) {
2120                 g_free(alg_key);
2121                 return -TARGET_EFAULT;
2122             }
2123             ret = get_errno(setsockopt(sockfd, level, optname,
2124                                        alg_key, optlen));
2125             g_free(alg_key);
2126             break;
2127         }
2128         case ALG_SET_AEAD_AUTHSIZE:
2129         {
2130             ret = get_errno(setsockopt(sockfd, level, optname,
2131                                        NULL, optlen));
2132             break;
2133         }
2134         default:
2135             goto unimplemented;
2136         }
2137         break;
2138 #endif
2139     case TARGET_SOL_SOCKET:
2140         switch (optname) {
2141         case TARGET_SO_RCVTIMEO:
2142         {
2143                 struct timeval tv;
2144 
2145                 optname = SO_RCVTIMEO;
2146 
2147 set_timeout:
2148                 if (optlen != sizeof(struct target_timeval)) {
2149                     return -TARGET_EINVAL;
2150                 }
2151 
2152                 if (copy_from_user_timeval(&tv, optval_addr)) {
2153                     return -TARGET_EFAULT;
2154                 }
2155 
2156                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2157                                 &tv, sizeof(tv)));
2158                 return ret;
2159         }
2160         case TARGET_SO_SNDTIMEO:
2161                 optname = SO_SNDTIMEO;
2162                 goto set_timeout;
2163         case TARGET_SO_ATTACH_FILTER:
2164         {
2165                 struct target_sock_fprog *tfprog;
2166                 struct target_sock_filter *tfilter;
2167                 struct sock_fprog fprog;
2168                 struct sock_filter *filter;
2169                 int i;
2170 
2171                 if (optlen != sizeof(*tfprog)) {
2172                     return -TARGET_EINVAL;
2173                 }
2174                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2175                     return -TARGET_EFAULT;
2176                 }
2177                 if (!lock_user_struct(VERIFY_READ, tfilter,
2178                                       tswapal(tfprog->filter), 0)) {
2179                     unlock_user_struct(tfprog, optval_addr, 1);
2180                     return -TARGET_EFAULT;
2181                 }
2182 
2183                 fprog.len = tswap16(tfprog->len);
2184                 filter = g_try_new(struct sock_filter, fprog.len);
2185                 if (filter == NULL) {
2186                     unlock_user_struct(tfilter, tfprog->filter, 1);
2187                     unlock_user_struct(tfprog, optval_addr, 1);
2188                     return -TARGET_ENOMEM;
2189                 }
2190                 for (i = 0; i < fprog.len; i++) {
2191                     filter[i].code = tswap16(tfilter[i].code);
2192                     filter[i].jt = tfilter[i].jt;
2193                     filter[i].jf = tfilter[i].jf;
2194                     filter[i].k = tswap32(tfilter[i].k);
2195                 }
2196                 fprog.filter = filter;
2197 
2198                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2199                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2200                 g_free(filter);
2201 
2202                 unlock_user_struct(tfilter, tfprog->filter, 1);
2203                 unlock_user_struct(tfprog, optval_addr, 1);
2204                 return ret;
2205         }
2206 	case TARGET_SO_BINDTODEVICE:
2207 	{
2208 		char *dev_ifname, *addr_ifname;
2209 
2210 		if (optlen > IFNAMSIZ - 1) {
2211 		    optlen = IFNAMSIZ - 1;
2212 		}
2213 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2214 		if (!dev_ifname) {
2215 		    return -TARGET_EFAULT;
2216 		}
2217 		optname = SO_BINDTODEVICE;
2218 		addr_ifname = alloca(IFNAMSIZ);
2219 		memcpy(addr_ifname, dev_ifname, optlen);
2220 		addr_ifname[optlen] = 0;
2221 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2222                                            addr_ifname, optlen));
2223 		unlock_user (dev_ifname, optval_addr, 0);
2224 		return ret;
2225 	}
2226         case TARGET_SO_LINGER:
2227         {
2228                 struct linger lg;
2229                 struct target_linger *tlg;
2230 
2231                 if (optlen != sizeof(struct target_linger)) {
2232                     return -TARGET_EINVAL;
2233                 }
2234                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2235                     return -TARGET_EFAULT;
2236                 }
2237                 __get_user(lg.l_onoff, &tlg->l_onoff);
2238                 __get_user(lg.l_linger, &tlg->l_linger);
2239                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2240                                 &lg, sizeof(lg)));
2241                 unlock_user_struct(tlg, optval_addr, 0);
2242                 return ret;
2243         }
2244             /* Options with 'int' argument.  */
2245         case TARGET_SO_DEBUG:
2246 		optname = SO_DEBUG;
2247 		break;
2248         case TARGET_SO_REUSEADDR:
2249 		optname = SO_REUSEADDR;
2250 		break;
2251 #ifdef SO_REUSEPORT
2252         case TARGET_SO_REUSEPORT:
2253                 optname = SO_REUSEPORT;
2254                 break;
2255 #endif
2256         case TARGET_SO_TYPE:
2257 		optname = SO_TYPE;
2258 		break;
2259         case TARGET_SO_ERROR:
2260 		optname = SO_ERROR;
2261 		break;
2262         case TARGET_SO_DONTROUTE:
2263 		optname = SO_DONTROUTE;
2264 		break;
2265         case TARGET_SO_BROADCAST:
2266 		optname = SO_BROADCAST;
2267 		break;
2268         case TARGET_SO_SNDBUF:
2269 		optname = SO_SNDBUF;
2270 		break;
2271         case TARGET_SO_SNDBUFFORCE:
2272                 optname = SO_SNDBUFFORCE;
2273                 break;
2274         case TARGET_SO_RCVBUF:
2275 		optname = SO_RCVBUF;
2276 		break;
2277         case TARGET_SO_RCVBUFFORCE:
2278                 optname = SO_RCVBUFFORCE;
2279                 break;
2280         case TARGET_SO_KEEPALIVE:
2281 		optname = SO_KEEPALIVE;
2282 		break;
2283         case TARGET_SO_OOBINLINE:
2284 		optname = SO_OOBINLINE;
2285 		break;
2286         case TARGET_SO_NO_CHECK:
2287 		optname = SO_NO_CHECK;
2288 		break;
2289         case TARGET_SO_PRIORITY:
2290 		optname = SO_PRIORITY;
2291 		break;
2292 #ifdef SO_BSDCOMPAT
2293         case TARGET_SO_BSDCOMPAT:
2294 		optname = SO_BSDCOMPAT;
2295 		break;
2296 #endif
2297         case TARGET_SO_PASSCRED:
2298 		optname = SO_PASSCRED;
2299 		break;
2300         case TARGET_SO_PASSSEC:
2301                 optname = SO_PASSSEC;
2302                 break;
2303         case TARGET_SO_TIMESTAMP:
2304 		optname = SO_TIMESTAMP;
2305 		break;
2306         case TARGET_SO_RCVLOWAT:
2307 		optname = SO_RCVLOWAT;
2308 		break;
2309         default:
2310             goto unimplemented;
2311         }
2312 	if (optlen < sizeof(uint32_t))
2313             return -TARGET_EINVAL;
2314 
2315 	if (get_user_u32(val, optval_addr))
2316             return -TARGET_EFAULT;
2317 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2318         break;
2319 #ifdef SOL_NETLINK
2320     case SOL_NETLINK:
2321         switch (optname) {
2322         case NETLINK_PKTINFO:
2323         case NETLINK_ADD_MEMBERSHIP:
2324         case NETLINK_DROP_MEMBERSHIP:
2325         case NETLINK_BROADCAST_ERROR:
2326         case NETLINK_NO_ENOBUFS:
2327 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2328         case NETLINK_LISTEN_ALL_NSID:
2329         case NETLINK_CAP_ACK:
2330 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2331 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2332         case NETLINK_EXT_ACK:
2333 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2334 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2335         case NETLINK_GET_STRICT_CHK:
2336 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2337             break;
2338         default:
2339             goto unimplemented;
2340         }
2341         val = 0;
2342         if (optlen < sizeof(uint32_t)) {
2343             return -TARGET_EINVAL;
2344         }
2345         if (get_user_u32(val, optval_addr)) {
2346             return -TARGET_EFAULT;
2347         }
2348         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2349                                    sizeof(val)));
2350         break;
2351 #endif /* SOL_NETLINK */
2352     default:
2353     unimplemented:
2354         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2355                       level, optname);
2356         ret = -TARGET_ENOPROTOOPT;
2357     }
2358     return ret;
2359 }
2360 
2361 /* do_getsockopt() Must return target values and target errnos. */
2362 static abi_long do_getsockopt(int sockfd, int level, int optname,
2363                               abi_ulong optval_addr, abi_ulong optlen)
2364 {
2365     abi_long ret;
2366     int len, val;
2367     socklen_t lv;
2368 
2369     switch(level) {
2370     case TARGET_SOL_SOCKET:
2371         level = SOL_SOCKET;
2372         switch (optname) {
2373         /* These don't just return a single integer */
2374         case TARGET_SO_PEERNAME:
2375             goto unimplemented;
2376         case TARGET_SO_RCVTIMEO: {
2377             struct timeval tv;
2378             socklen_t tvlen;
2379 
2380             optname = SO_RCVTIMEO;
2381 
2382 get_timeout:
2383             if (get_user_u32(len, optlen)) {
2384                 return -TARGET_EFAULT;
2385             }
2386             if (len < 0) {
2387                 return -TARGET_EINVAL;
2388             }
2389 
2390             tvlen = sizeof(tv);
2391             ret = get_errno(getsockopt(sockfd, level, optname,
2392                                        &tv, &tvlen));
2393             if (ret < 0) {
2394                 return ret;
2395             }
2396             if (len > sizeof(struct target_timeval)) {
2397                 len = sizeof(struct target_timeval);
2398             }
2399             if (copy_to_user_timeval(optval_addr, &tv)) {
2400                 return -TARGET_EFAULT;
2401             }
2402             if (put_user_u32(len, optlen)) {
2403                 return -TARGET_EFAULT;
2404             }
2405             break;
2406         }
2407         case TARGET_SO_SNDTIMEO:
2408             optname = SO_SNDTIMEO;
2409             goto get_timeout;
2410         case TARGET_SO_PEERCRED: {
2411             struct ucred cr;
2412             socklen_t crlen;
2413             struct target_ucred *tcr;
2414 
2415             if (get_user_u32(len, optlen)) {
2416                 return -TARGET_EFAULT;
2417             }
2418             if (len < 0) {
2419                 return -TARGET_EINVAL;
2420             }
2421 
2422             crlen = sizeof(cr);
2423             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2424                                        &cr, &crlen));
2425             if (ret < 0) {
2426                 return ret;
2427             }
2428             if (len > crlen) {
2429                 len = crlen;
2430             }
2431             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2432                 return -TARGET_EFAULT;
2433             }
2434             __put_user(cr.pid, &tcr->pid);
2435             __put_user(cr.uid, &tcr->uid);
2436             __put_user(cr.gid, &tcr->gid);
2437             unlock_user_struct(tcr, optval_addr, 1);
2438             if (put_user_u32(len, optlen)) {
2439                 return -TARGET_EFAULT;
2440             }
2441             break;
2442         }
2443         case TARGET_SO_PEERSEC: {
2444             char *name;
2445 
2446             if (get_user_u32(len, optlen)) {
2447                 return -TARGET_EFAULT;
2448             }
2449             if (len < 0) {
2450                 return -TARGET_EINVAL;
2451             }
2452             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2453             if (!name) {
2454                 return -TARGET_EFAULT;
2455             }
2456             lv = len;
2457             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2458                                        name, &lv));
2459             if (put_user_u32(lv, optlen)) {
2460                 ret = -TARGET_EFAULT;
2461             }
2462             unlock_user(name, optval_addr, lv);
2463             break;
2464         }
2465         case TARGET_SO_LINGER:
2466         {
2467             struct linger lg;
2468             socklen_t lglen;
2469             struct target_linger *tlg;
2470 
2471             if (get_user_u32(len, optlen)) {
2472                 return -TARGET_EFAULT;
2473             }
2474             if (len < 0) {
2475                 return -TARGET_EINVAL;
2476             }
2477 
2478             lglen = sizeof(lg);
2479             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2480                                        &lg, &lglen));
2481             if (ret < 0) {
2482                 return ret;
2483             }
2484             if (len > lglen) {
2485                 len = lglen;
2486             }
2487             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2488                 return -TARGET_EFAULT;
2489             }
2490             __put_user(lg.l_onoff, &tlg->l_onoff);
2491             __put_user(lg.l_linger, &tlg->l_linger);
2492             unlock_user_struct(tlg, optval_addr, 1);
2493             if (put_user_u32(len, optlen)) {
2494                 return -TARGET_EFAULT;
2495             }
2496             break;
2497         }
2498         /* Options with 'int' argument.  */
2499         case TARGET_SO_DEBUG:
2500             optname = SO_DEBUG;
2501             goto int_case;
2502         case TARGET_SO_REUSEADDR:
2503             optname = SO_REUSEADDR;
2504             goto int_case;
2505 #ifdef SO_REUSEPORT
2506         case TARGET_SO_REUSEPORT:
2507             optname = SO_REUSEPORT;
2508             goto int_case;
2509 #endif
2510         case TARGET_SO_TYPE:
2511             optname = SO_TYPE;
2512             goto int_case;
2513         case TARGET_SO_ERROR:
2514             optname = SO_ERROR;
2515             goto int_case;
2516         case TARGET_SO_DONTROUTE:
2517             optname = SO_DONTROUTE;
2518             goto int_case;
2519         case TARGET_SO_BROADCAST:
2520             optname = SO_BROADCAST;
2521             goto int_case;
2522         case TARGET_SO_SNDBUF:
2523             optname = SO_SNDBUF;
2524             goto int_case;
2525         case TARGET_SO_RCVBUF:
2526             optname = SO_RCVBUF;
2527             goto int_case;
2528         case TARGET_SO_KEEPALIVE:
2529             optname = SO_KEEPALIVE;
2530             goto int_case;
2531         case TARGET_SO_OOBINLINE:
2532             optname = SO_OOBINLINE;
2533             goto int_case;
2534         case TARGET_SO_NO_CHECK:
2535             optname = SO_NO_CHECK;
2536             goto int_case;
2537         case TARGET_SO_PRIORITY:
2538             optname = SO_PRIORITY;
2539             goto int_case;
2540 #ifdef SO_BSDCOMPAT
2541         case TARGET_SO_BSDCOMPAT:
2542             optname = SO_BSDCOMPAT;
2543             goto int_case;
2544 #endif
2545         case TARGET_SO_PASSCRED:
2546             optname = SO_PASSCRED;
2547             goto int_case;
2548         case TARGET_SO_TIMESTAMP:
2549             optname = SO_TIMESTAMP;
2550             goto int_case;
2551         case TARGET_SO_RCVLOWAT:
2552             optname = SO_RCVLOWAT;
2553             goto int_case;
2554         case TARGET_SO_ACCEPTCONN:
2555             optname = SO_ACCEPTCONN;
2556             goto int_case;
2557         default:
2558             goto int_case;
2559         }
2560         break;
2561     case SOL_TCP:
2562         /* TCP options all take an 'int' value.  */
2563     int_case:
2564         if (get_user_u32(len, optlen))
2565             return -TARGET_EFAULT;
2566         if (len < 0)
2567             return -TARGET_EINVAL;
2568         lv = sizeof(lv);
2569         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2570         if (ret < 0)
2571             return ret;
2572         if (optname == SO_TYPE) {
2573             val = host_to_target_sock_type(val);
2574         }
2575         if (len > lv)
2576             len = lv;
2577         if (len == 4) {
2578             if (put_user_u32(val, optval_addr))
2579                 return -TARGET_EFAULT;
2580         } else {
2581             if (put_user_u8(val, optval_addr))
2582                 return -TARGET_EFAULT;
2583         }
2584         if (put_user_u32(len, optlen))
2585             return -TARGET_EFAULT;
2586         break;
2587     case SOL_IP:
2588         switch(optname) {
2589         case IP_TOS:
2590         case IP_TTL:
2591         case IP_HDRINCL:
2592         case IP_ROUTER_ALERT:
2593         case IP_RECVOPTS:
2594         case IP_RETOPTS:
2595         case IP_PKTINFO:
2596         case IP_MTU_DISCOVER:
2597         case IP_RECVERR:
2598         case IP_RECVTOS:
2599 #ifdef IP_FREEBIND
2600         case IP_FREEBIND:
2601 #endif
2602         case IP_MULTICAST_TTL:
2603         case IP_MULTICAST_LOOP:
2604             if (get_user_u32(len, optlen))
2605                 return -TARGET_EFAULT;
2606             if (len < 0)
2607                 return -TARGET_EINVAL;
2608             lv = sizeof(lv);
2609             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2610             if (ret < 0)
2611                 return ret;
2612             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2613                 len = 1;
2614                 if (put_user_u32(len, optlen)
2615                     || put_user_u8(val, optval_addr))
2616                     return -TARGET_EFAULT;
2617             } else {
2618                 if (len > sizeof(int))
2619                     len = sizeof(int);
2620                 if (put_user_u32(len, optlen)
2621                     || put_user_u32(val, optval_addr))
2622                     return -TARGET_EFAULT;
2623             }
2624             break;
2625         default:
2626             ret = -TARGET_ENOPROTOOPT;
2627             break;
2628         }
2629         break;
2630     case SOL_IPV6:
2631         switch (optname) {
2632         case IPV6_MTU_DISCOVER:
2633         case IPV6_MTU:
2634         case IPV6_V6ONLY:
2635         case IPV6_RECVPKTINFO:
2636         case IPV6_UNICAST_HOPS:
2637         case IPV6_MULTICAST_HOPS:
2638         case IPV6_MULTICAST_LOOP:
2639         case IPV6_RECVERR:
2640         case IPV6_RECVHOPLIMIT:
2641         case IPV6_2292HOPLIMIT:
2642         case IPV6_CHECKSUM:
2643         case IPV6_ADDRFORM:
2644         case IPV6_2292PKTINFO:
2645         case IPV6_RECVTCLASS:
2646         case IPV6_RECVRTHDR:
2647         case IPV6_2292RTHDR:
2648         case IPV6_RECVHOPOPTS:
2649         case IPV6_2292HOPOPTS:
2650         case IPV6_RECVDSTOPTS:
2651         case IPV6_2292DSTOPTS:
2652         case IPV6_TCLASS:
2653 #ifdef IPV6_RECVPATHMTU
2654         case IPV6_RECVPATHMTU:
2655 #endif
2656 #ifdef IPV6_TRANSPARENT
2657         case IPV6_TRANSPARENT:
2658 #endif
2659 #ifdef IPV6_FREEBIND
2660         case IPV6_FREEBIND:
2661 #endif
2662 #ifdef IPV6_RECVORIGDSTADDR
2663         case IPV6_RECVORIGDSTADDR:
2664 #endif
2665             if (get_user_u32(len, optlen))
2666                 return -TARGET_EFAULT;
2667             if (len < 0)
2668                 return -TARGET_EINVAL;
2669             lv = sizeof(lv);
2670             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2671             if (ret < 0)
2672                 return ret;
2673             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2674                 len = 1;
2675                 if (put_user_u32(len, optlen)
2676                     || put_user_u8(val, optval_addr))
2677                     return -TARGET_EFAULT;
2678             } else {
2679                 if (len > sizeof(int))
2680                     len = sizeof(int);
2681                 if (put_user_u32(len, optlen)
2682                     || put_user_u32(val, optval_addr))
2683                     return -TARGET_EFAULT;
2684             }
2685             break;
2686         default:
2687             ret = -TARGET_ENOPROTOOPT;
2688             break;
2689         }
2690         break;
2691 #ifdef SOL_NETLINK
2692     case SOL_NETLINK:
2693         switch (optname) {
2694         case NETLINK_PKTINFO:
2695         case NETLINK_BROADCAST_ERROR:
2696         case NETLINK_NO_ENOBUFS:
2697 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2698         case NETLINK_LISTEN_ALL_NSID:
2699         case NETLINK_CAP_ACK:
2700 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2701 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2702         case NETLINK_EXT_ACK:
2703 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2704 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2705         case NETLINK_GET_STRICT_CHK:
2706 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2707             if (get_user_u32(len, optlen)) {
2708                 return -TARGET_EFAULT;
2709             }
2710             if (len != sizeof(val)) {
2711                 return -TARGET_EINVAL;
2712             }
2713             lv = len;
2714             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2715             if (ret < 0) {
2716                 return ret;
2717             }
2718             if (put_user_u32(lv, optlen)
2719                 || put_user_u32(val, optval_addr)) {
2720                 return -TARGET_EFAULT;
2721             }
2722             break;
2723 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2724         case NETLINK_LIST_MEMBERSHIPS:
2725         {
2726             uint32_t *results;
2727             int i;
2728             if (get_user_u32(len, optlen)) {
2729                 return -TARGET_EFAULT;
2730             }
2731             if (len < 0) {
2732                 return -TARGET_EINVAL;
2733             }
2734             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2735             if (!results) {
2736                 return -TARGET_EFAULT;
2737             }
2738             lv = len;
2739             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2740             if (ret < 0) {
2741                 unlock_user(results, optval_addr, 0);
2742                 return ret;
2743             }
2744             /* swap host endianess to target endianess. */
2745             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2746                 results[i] = tswap32(results[i]);
2747             }
2748             if (put_user_u32(lv, optlen)) {
2749                 return -TARGET_EFAULT;
2750             }
2751             unlock_user(results, optval_addr, 0);
2752             break;
2753         }
2754 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2755         default:
2756             goto unimplemented;
2757         }
2758         break;
2759 #endif /* SOL_NETLINK */
2760     default:
2761     unimplemented:
2762         qemu_log_mask(LOG_UNIMP,
2763                       "getsockopt level=%d optname=%d not yet supported\n",
2764                       level, optname);
2765         ret = -TARGET_EOPNOTSUPP;
2766         break;
2767     }
2768     return ret;
2769 }
2770 
2771 /* Convert target low/high pair representing file offset into the host
2772  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2773  * as the kernel doesn't handle them either.
2774  */
2775 static void target_to_host_low_high(abi_ulong tlow,
2776                                     abi_ulong thigh,
2777                                     unsigned long *hlow,
2778                                     unsigned long *hhigh)
2779 {
2780     uint64_t off = tlow |
2781         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2782         TARGET_LONG_BITS / 2;
2783 
2784     *hlow = off;
2785     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2786 }
2787 
2788 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2789                                 abi_ulong count, int copy)
2790 {
2791     struct target_iovec *target_vec;
2792     struct iovec *vec;
2793     abi_ulong total_len, max_len;
2794     int i;
2795     int err = 0;
2796     bool bad_address = false;
2797 
2798     if (count == 0) {
2799         errno = 0;
2800         return NULL;
2801     }
2802     if (count > IOV_MAX) {
2803         errno = EINVAL;
2804         return NULL;
2805     }
2806 
2807     vec = g_try_new0(struct iovec, count);
2808     if (vec == NULL) {
2809         errno = ENOMEM;
2810         return NULL;
2811     }
2812 
2813     target_vec = lock_user(VERIFY_READ, target_addr,
2814                            count * sizeof(struct target_iovec), 1);
2815     if (target_vec == NULL) {
2816         err = EFAULT;
2817         goto fail2;
2818     }
2819 
2820     /* ??? If host page size > target page size, this will result in a
2821        value larger than what we can actually support.  */
2822     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2823     total_len = 0;
2824 
2825     for (i = 0; i < count; i++) {
2826         abi_ulong base = tswapal(target_vec[i].iov_base);
2827         abi_long len = tswapal(target_vec[i].iov_len);
2828 
2829         if (len < 0) {
2830             err = EINVAL;
2831             goto fail;
2832         } else if (len == 0) {
2833             /* Zero length pointer is ignored.  */
2834             vec[i].iov_base = 0;
2835         } else {
2836             vec[i].iov_base = lock_user(type, base, len, copy);
2837             /* If the first buffer pointer is bad, this is a fault.  But
2838              * subsequent bad buffers will result in a partial write; this
2839              * is realized by filling the vector with null pointers and
2840              * zero lengths. */
2841             if (!vec[i].iov_base) {
2842                 if (i == 0) {
2843                     err = EFAULT;
2844                     goto fail;
2845                 } else {
2846                     bad_address = true;
2847                 }
2848             }
2849             if (bad_address) {
2850                 len = 0;
2851             }
2852             if (len > max_len - total_len) {
2853                 len = max_len - total_len;
2854             }
2855         }
2856         vec[i].iov_len = len;
2857         total_len += len;
2858     }
2859 
2860     unlock_user(target_vec, target_addr, 0);
2861     return vec;
2862 
2863  fail:
2864     while (--i >= 0) {
2865         if (tswapal(target_vec[i].iov_len) > 0) {
2866             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2867         }
2868     }
2869     unlock_user(target_vec, target_addr, 0);
2870  fail2:
2871     g_free(vec);
2872     errno = err;
2873     return NULL;
2874 }
2875 
2876 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2877                          abi_ulong count, int copy)
2878 {
2879     struct target_iovec *target_vec;
2880     int i;
2881 
2882     target_vec = lock_user(VERIFY_READ, target_addr,
2883                            count * sizeof(struct target_iovec), 1);
2884     if (target_vec) {
2885         for (i = 0; i < count; i++) {
2886             abi_ulong base = tswapal(target_vec[i].iov_base);
2887             abi_long len = tswapal(target_vec[i].iov_len);
2888             if (len < 0) {
2889                 break;
2890             }
2891             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2892         }
2893         unlock_user(target_vec, target_addr, 0);
2894     }
2895 
2896     g_free(vec);
2897 }
2898 
2899 static inline int target_to_host_sock_type(int *type)
2900 {
2901     int host_type = 0;
2902     int target_type = *type;
2903 
2904     switch (target_type & TARGET_SOCK_TYPE_MASK) {
2905     case TARGET_SOCK_DGRAM:
2906         host_type = SOCK_DGRAM;
2907         break;
2908     case TARGET_SOCK_STREAM:
2909         host_type = SOCK_STREAM;
2910         break;
2911     default:
2912         host_type = target_type & TARGET_SOCK_TYPE_MASK;
2913         break;
2914     }
2915     if (target_type & TARGET_SOCK_CLOEXEC) {
2916 #if defined(SOCK_CLOEXEC)
2917         host_type |= SOCK_CLOEXEC;
2918 #else
2919         return -TARGET_EINVAL;
2920 #endif
2921     }
2922     if (target_type & TARGET_SOCK_NONBLOCK) {
2923 #if defined(SOCK_NONBLOCK)
2924         host_type |= SOCK_NONBLOCK;
2925 #elif !defined(O_NONBLOCK)
2926         return -TARGET_EINVAL;
2927 #endif
2928     }
2929     *type = host_type;
2930     return 0;
2931 }
2932 
2933 /* Try to emulate socket type flags after socket creation.  */
2934 static int sock_flags_fixup(int fd, int target_type)
2935 {
2936 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2937     if (target_type & TARGET_SOCK_NONBLOCK) {
2938         int flags = fcntl(fd, F_GETFL);
2939         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2940             close(fd);
2941             return -TARGET_EINVAL;
2942         }
2943     }
2944 #endif
2945     return fd;
2946 }
2947 
2948 /* do_socket() Must return target values and target errnos. */
2949 static abi_long do_socket(int domain, int type, int protocol)
2950 {
2951     int target_type = type;
2952     int ret;
2953 
2954     ret = target_to_host_sock_type(&type);
2955     if (ret) {
2956         return ret;
2957     }
2958 
2959     if (domain == PF_NETLINK && !(
2960 #ifdef CONFIG_RTNETLINK
2961          protocol == NETLINK_ROUTE ||
2962 #endif
2963          protocol == NETLINK_KOBJECT_UEVENT ||
2964          protocol == NETLINK_AUDIT)) {
2965         return -TARGET_EPROTONOSUPPORT;
2966     }
2967 
2968     if (domain == AF_PACKET ||
2969         (domain == AF_INET && type == SOCK_PACKET)) {
2970         protocol = tswap16(protocol);
2971     }
2972 
2973     ret = get_errno(socket(domain, type, protocol));
2974     if (ret >= 0) {
2975         ret = sock_flags_fixup(ret, target_type);
2976         if (type == SOCK_PACKET) {
2977             /* Manage an obsolete case :
2978              * if socket type is SOCK_PACKET, bind by name
2979              */
2980             fd_trans_register(ret, &target_packet_trans);
2981         } else if (domain == PF_NETLINK) {
2982             switch (protocol) {
2983 #ifdef CONFIG_RTNETLINK
2984             case NETLINK_ROUTE:
2985                 fd_trans_register(ret, &target_netlink_route_trans);
2986                 break;
2987 #endif
2988             case NETLINK_KOBJECT_UEVENT:
2989                 /* nothing to do: messages are strings */
2990                 break;
2991             case NETLINK_AUDIT:
2992                 fd_trans_register(ret, &target_netlink_audit_trans);
2993                 break;
2994             default:
2995                 g_assert_not_reached();
2996             }
2997         }
2998     }
2999     return ret;
3000 }
3001 
3002 /* do_bind() Must return target values and target errnos. */
3003 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3004                         socklen_t addrlen)
3005 {
3006     void *addr;
3007     abi_long ret;
3008 
3009     if ((int)addrlen < 0) {
3010         return -TARGET_EINVAL;
3011     }
3012 
3013     addr = alloca(addrlen+1);
3014 
3015     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3016     if (ret)
3017         return ret;
3018 
3019     return get_errno(bind(sockfd, addr, addrlen));
3020 }
3021 
3022 /* do_connect() Must return target values and target errnos. */
3023 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3024                            socklen_t addrlen)
3025 {
3026     void *addr;
3027     abi_long ret;
3028 
3029     if ((int)addrlen < 0) {
3030         return -TARGET_EINVAL;
3031     }
3032 
3033     addr = alloca(addrlen+1);
3034 
3035     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3036     if (ret)
3037         return ret;
3038 
3039     return get_errno(safe_connect(sockfd, addr, addrlen));
3040 }
3041 
3042 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3043 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3044                                       int flags, int send)
3045 {
3046     abi_long ret, len;
3047     struct msghdr msg;
3048     abi_ulong count;
3049     struct iovec *vec;
3050     abi_ulong target_vec;
3051 
3052     if (msgp->msg_name) {
3053         msg.msg_namelen = tswap32(msgp->msg_namelen);
3054         msg.msg_name = alloca(msg.msg_namelen+1);
3055         ret = target_to_host_sockaddr(fd, msg.msg_name,
3056                                       tswapal(msgp->msg_name),
3057                                       msg.msg_namelen);
3058         if (ret == -TARGET_EFAULT) {
3059             /* For connected sockets msg_name and msg_namelen must
3060              * be ignored, so returning EFAULT immediately is wrong.
3061              * Instead, pass a bad msg_name to the host kernel, and
3062              * let it decide whether to return EFAULT or not.
3063              */
3064             msg.msg_name = (void *)-1;
3065         } else if (ret) {
3066             goto out2;
3067         }
3068     } else {
3069         msg.msg_name = NULL;
3070         msg.msg_namelen = 0;
3071     }
3072     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3073     msg.msg_control = alloca(msg.msg_controllen);
3074     memset(msg.msg_control, 0, msg.msg_controllen);
3075 
3076     msg.msg_flags = tswap32(msgp->msg_flags);
3077 
3078     count = tswapal(msgp->msg_iovlen);
3079     target_vec = tswapal(msgp->msg_iov);
3080 
3081     if (count > IOV_MAX) {
3082         /* sendrcvmsg returns a different errno for this condition than
3083          * readv/writev, so we must catch it here before lock_iovec() does.
3084          */
3085         ret = -TARGET_EMSGSIZE;
3086         goto out2;
3087     }
3088 
3089     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3090                      target_vec, count, send);
3091     if (vec == NULL) {
3092         ret = -host_to_target_errno(errno);
3093         goto out2;
3094     }
3095     msg.msg_iovlen = count;
3096     msg.msg_iov = vec;
3097 
3098     if (send) {
3099         if (fd_trans_target_to_host_data(fd)) {
3100             void *host_msg;
3101 
3102             host_msg = g_malloc(msg.msg_iov->iov_len);
3103             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3104             ret = fd_trans_target_to_host_data(fd)(host_msg,
3105                                                    msg.msg_iov->iov_len);
3106             if (ret >= 0) {
3107                 msg.msg_iov->iov_base = host_msg;
3108                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3109             }
3110             g_free(host_msg);
3111         } else {
3112             ret = target_to_host_cmsg(&msg, msgp);
3113             if (ret == 0) {
3114                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3115             }
3116         }
3117     } else {
3118         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3119         if (!is_error(ret)) {
3120             len = ret;
3121             if (fd_trans_host_to_target_data(fd)) {
3122                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3123                                                MIN(msg.msg_iov->iov_len, len));
3124             } else {
3125                 ret = host_to_target_cmsg(msgp, &msg);
3126             }
3127             if (!is_error(ret)) {
3128                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3129                 msgp->msg_flags = tswap32(msg.msg_flags);
3130                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3131                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3132                                     msg.msg_name, msg.msg_namelen);
3133                     if (ret) {
3134                         goto out;
3135                     }
3136                 }
3137 
3138                 ret = len;
3139             }
3140         }
3141     }
3142 
3143 out:
3144     unlock_iovec(vec, target_vec, count, !send);
3145 out2:
3146     return ret;
3147 }
3148 
3149 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3150                                int flags, int send)
3151 {
3152     abi_long ret;
3153     struct target_msghdr *msgp;
3154 
3155     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3156                           msgp,
3157                           target_msg,
3158                           send ? 1 : 0)) {
3159         return -TARGET_EFAULT;
3160     }
3161     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3162     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3163     return ret;
3164 }
3165 
3166 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3167  * so it might not have this *mmsg-specific flag either.
3168  */
3169 #ifndef MSG_WAITFORONE
3170 #define MSG_WAITFORONE 0x10000
3171 #endif
3172 
3173 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3174                                 unsigned int vlen, unsigned int flags,
3175                                 int send)
3176 {
3177     struct target_mmsghdr *mmsgp;
3178     abi_long ret = 0;
3179     int i;
3180 
3181     if (vlen > UIO_MAXIOV) {
3182         vlen = UIO_MAXIOV;
3183     }
3184 
3185     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3186     if (!mmsgp) {
3187         return -TARGET_EFAULT;
3188     }
3189 
3190     for (i = 0; i < vlen; i++) {
3191         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3192         if (is_error(ret)) {
3193             break;
3194         }
3195         mmsgp[i].msg_len = tswap32(ret);
3196         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3197         if (flags & MSG_WAITFORONE) {
3198             flags |= MSG_DONTWAIT;
3199         }
3200     }
3201 
3202     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3203 
3204     /* Return number of datagrams sent if we sent any at all;
3205      * otherwise return the error.
3206      */
3207     if (i) {
3208         return i;
3209     }
3210     return ret;
3211 }
3212 
3213 /* do_accept4() Must return target values and target errnos. */
3214 static abi_long do_accept4(int fd, abi_ulong target_addr,
3215                            abi_ulong target_addrlen_addr, int flags)
3216 {
3217     socklen_t addrlen, ret_addrlen;
3218     void *addr;
3219     abi_long ret;
3220     int host_flags;
3221 
3222     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3223 
3224     if (target_addr == 0) {
3225         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3226     }
3227 
3228     /* linux returns EINVAL if addrlen pointer is invalid */
3229     if (get_user_u32(addrlen, target_addrlen_addr))
3230         return -TARGET_EINVAL;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3237         return -TARGET_EINVAL;
3238 
3239     addr = alloca(addrlen);
3240 
3241     ret_addrlen = addrlen;
3242     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3243     if (!is_error(ret)) {
3244         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3245         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3246             ret = -TARGET_EFAULT;
3247         }
3248     }
3249     return ret;
3250 }
3251 
3252 /* do_getpeername() Must return target values and target errnos. */
3253 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3254                                abi_ulong target_addrlen_addr)
3255 {
3256     socklen_t addrlen, ret_addrlen;
3257     void *addr;
3258     abi_long ret;
3259 
3260     if (get_user_u32(addrlen, target_addrlen_addr))
3261         return -TARGET_EFAULT;
3262 
3263     if ((int)addrlen < 0) {
3264         return -TARGET_EINVAL;
3265     }
3266 
3267     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3268         return -TARGET_EFAULT;
3269 
3270     addr = alloca(addrlen);
3271 
3272     ret_addrlen = addrlen;
3273     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3274     if (!is_error(ret)) {
3275         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3276         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3277             ret = -TARGET_EFAULT;
3278         }
3279     }
3280     return ret;
3281 }
3282 
3283 /* do_getsockname() Must return target values and target errnos. */
3284 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3285                                abi_ulong target_addrlen_addr)
3286 {
3287     socklen_t addrlen, ret_addrlen;
3288     void *addr;
3289     abi_long ret;
3290 
3291     if (get_user_u32(addrlen, target_addrlen_addr))
3292         return -TARGET_EFAULT;
3293 
3294     if ((int)addrlen < 0) {
3295         return -TARGET_EINVAL;
3296     }
3297 
3298     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3299         return -TARGET_EFAULT;
3300 
3301     addr = alloca(addrlen);
3302 
3303     ret_addrlen = addrlen;
3304     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3305     if (!is_error(ret)) {
3306         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3307         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3308             ret = -TARGET_EFAULT;
3309         }
3310     }
3311     return ret;
3312 }
3313 
3314 /* do_socketpair() Must return target values and target errnos. */
3315 static abi_long do_socketpair(int domain, int type, int protocol,
3316                               abi_ulong target_tab_addr)
3317 {
3318     int tab[2];
3319     abi_long ret;
3320 
3321     target_to_host_sock_type(&type);
3322 
3323     ret = get_errno(socketpair(domain, type, protocol, tab));
3324     if (!is_error(ret)) {
3325         if (put_user_s32(tab[0], target_tab_addr)
3326             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3327             ret = -TARGET_EFAULT;
3328     }
3329     return ret;
3330 }
3331 
3332 /* do_sendto() Must return target values and target errnos. */
3333 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3334                           abi_ulong target_addr, socklen_t addrlen)
3335 {
3336     void *addr;
3337     void *host_msg;
3338     void *copy_msg = NULL;
3339     abi_long ret;
3340 
3341     if ((int)addrlen < 0) {
3342         return -TARGET_EINVAL;
3343     }
3344 
3345     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3346     if (!host_msg)
3347         return -TARGET_EFAULT;
3348     if (fd_trans_target_to_host_data(fd)) {
3349         copy_msg = host_msg;
3350         host_msg = g_malloc(len);
3351         memcpy(host_msg, copy_msg, len);
3352         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3353         if (ret < 0) {
3354             goto fail;
3355         }
3356     }
3357     if (target_addr) {
3358         addr = alloca(addrlen+1);
3359         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3360         if (ret) {
3361             goto fail;
3362         }
3363         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3364     } else {
3365         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3366     }
3367 fail:
3368     if (copy_msg) {
3369         g_free(host_msg);
3370         host_msg = copy_msg;
3371     }
3372     unlock_user(host_msg, msg, 0);
3373     return ret;
3374 }
3375 
3376 /* do_recvfrom() Must return target values and target errnos. */
3377 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3378                             abi_ulong target_addr,
3379                             abi_ulong target_addrlen)
3380 {
3381     socklen_t addrlen, ret_addrlen;
3382     void *addr;
3383     void *host_msg;
3384     abi_long ret;
3385 
3386     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3387     if (!host_msg)
3388         return -TARGET_EFAULT;
3389     if (target_addr) {
3390         if (get_user_u32(addrlen, target_addrlen)) {
3391             ret = -TARGET_EFAULT;
3392             goto fail;
3393         }
3394         if ((int)addrlen < 0) {
3395             ret = -TARGET_EINVAL;
3396             goto fail;
3397         }
3398         addr = alloca(addrlen);
3399         ret_addrlen = addrlen;
3400         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3401                                       addr, &ret_addrlen));
3402     } else {
3403         addr = NULL; /* To keep compiler quiet.  */
3404         addrlen = 0; /* To keep compiler quiet.  */
3405         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3406     }
3407     if (!is_error(ret)) {
3408         if (fd_trans_host_to_target_data(fd)) {
3409             abi_long trans;
3410             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3411             if (is_error(trans)) {
3412                 ret = trans;
3413                 goto fail;
3414             }
3415         }
3416         if (target_addr) {
3417             host_to_target_sockaddr(target_addr, addr,
3418                                     MIN(addrlen, ret_addrlen));
3419             if (put_user_u32(ret_addrlen, target_addrlen)) {
3420                 ret = -TARGET_EFAULT;
3421                 goto fail;
3422             }
3423         }
3424         unlock_user(host_msg, msg, len);
3425     } else {
3426 fail:
3427         unlock_user(host_msg, msg, 0);
3428     }
3429     return ret;
3430 }
3431 
3432 #ifdef TARGET_NR_socketcall
3433 /* do_socketcall() must return target values and target errnos. */
3434 static abi_long do_socketcall(int num, abi_ulong vptr)
3435 {
3436     static const unsigned nargs[] = { /* number of arguments per operation */
3437         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3438         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3439         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3440         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3441         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3442         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3443         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3444         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3445         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3446         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3447         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3448         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3449         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3450         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3451         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3452         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3453         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3454         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3455         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3456         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3457     };
3458     abi_long a[6]; /* max 6 args */
3459     unsigned i;
3460 
3461     /* check the range of the first argument num */
3462     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3463     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3464         return -TARGET_EINVAL;
3465     }
3466     /* ensure we have space for args */
3467     if (nargs[num] > ARRAY_SIZE(a)) {
3468         return -TARGET_EINVAL;
3469     }
3470     /* collect the arguments in a[] according to nargs[] */
3471     for (i = 0; i < nargs[num]; ++i) {
3472         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3473             return -TARGET_EFAULT;
3474         }
3475     }
3476     /* now when we have the args, invoke the appropriate underlying function */
3477     switch (num) {
3478     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3479         return do_socket(a[0], a[1], a[2]);
3480     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3481         return do_bind(a[0], a[1], a[2]);
3482     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3483         return do_connect(a[0], a[1], a[2]);
3484     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3485         return get_errno(listen(a[0], a[1]));
3486     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3487         return do_accept4(a[0], a[1], a[2], 0);
3488     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3489         return do_getsockname(a[0], a[1], a[2]);
3490     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3491         return do_getpeername(a[0], a[1], a[2]);
3492     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3493         return do_socketpair(a[0], a[1], a[2], a[3]);
3494     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3495         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3496     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3497         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3498     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3499         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3500     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3501         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3502     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3503         return get_errno(shutdown(a[0], a[1]));
3504     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3505         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3506     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3507         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3508     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3509         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3510     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3511         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3512     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3513         return do_accept4(a[0], a[1], a[2], a[3]);
3514     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3515         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3516     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3517         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3518     default:
3519         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3520         return -TARGET_EINVAL;
3521     }
3522 }
3523 #endif
3524 
3525 #define N_SHM_REGIONS	32
3526 
3527 static struct shm_region {
3528     abi_ulong start;
3529     abi_ulong size;
3530     bool in_use;
3531 } shm_regions[N_SHM_REGIONS];
3532 
3533 #ifndef TARGET_SEMID64_DS
3534 /* asm-generic version of this struct */
3535 struct target_semid64_ds
3536 {
3537   struct target_ipc_perm sem_perm;
3538   abi_ulong sem_otime;
3539 #if TARGET_ABI_BITS == 32
3540   abi_ulong __unused1;
3541 #endif
3542   abi_ulong sem_ctime;
3543 #if TARGET_ABI_BITS == 32
3544   abi_ulong __unused2;
3545 #endif
3546   abi_ulong sem_nsems;
3547   abi_ulong __unused3;
3548   abi_ulong __unused4;
3549 };
3550 #endif
3551 
3552 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3553                                                abi_ulong target_addr)
3554 {
3555     struct target_ipc_perm *target_ip;
3556     struct target_semid64_ds *target_sd;
3557 
3558     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3559         return -TARGET_EFAULT;
3560     target_ip = &(target_sd->sem_perm);
3561     host_ip->__key = tswap32(target_ip->__key);
3562     host_ip->uid = tswap32(target_ip->uid);
3563     host_ip->gid = tswap32(target_ip->gid);
3564     host_ip->cuid = tswap32(target_ip->cuid);
3565     host_ip->cgid = tswap32(target_ip->cgid);
3566 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3567     host_ip->mode = tswap32(target_ip->mode);
3568 #else
3569     host_ip->mode = tswap16(target_ip->mode);
3570 #endif
3571 #if defined(TARGET_PPC)
3572     host_ip->__seq = tswap32(target_ip->__seq);
3573 #else
3574     host_ip->__seq = tswap16(target_ip->__seq);
3575 #endif
3576     unlock_user_struct(target_sd, target_addr, 0);
3577     return 0;
3578 }
3579 
3580 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3581                                                struct ipc_perm *host_ip)
3582 {
3583     struct target_ipc_perm *target_ip;
3584     struct target_semid64_ds *target_sd;
3585 
3586     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3587         return -TARGET_EFAULT;
3588     target_ip = &(target_sd->sem_perm);
3589     target_ip->__key = tswap32(host_ip->__key);
3590     target_ip->uid = tswap32(host_ip->uid);
3591     target_ip->gid = tswap32(host_ip->gid);
3592     target_ip->cuid = tswap32(host_ip->cuid);
3593     target_ip->cgid = tswap32(host_ip->cgid);
3594 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3595     target_ip->mode = tswap32(host_ip->mode);
3596 #else
3597     target_ip->mode = tswap16(host_ip->mode);
3598 #endif
3599 #if defined(TARGET_PPC)
3600     target_ip->__seq = tswap32(host_ip->__seq);
3601 #else
3602     target_ip->__seq = tswap16(host_ip->__seq);
3603 #endif
3604     unlock_user_struct(target_sd, target_addr, 1);
3605     return 0;
3606 }
3607 
3608 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3609                                                abi_ulong target_addr)
3610 {
3611     struct target_semid64_ds *target_sd;
3612 
3613     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3614         return -TARGET_EFAULT;
3615     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3616         return -TARGET_EFAULT;
3617     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3618     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3619     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3620     unlock_user_struct(target_sd, target_addr, 0);
3621     return 0;
3622 }
3623 
3624 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3625                                                struct semid_ds *host_sd)
3626 {
3627     struct target_semid64_ds *target_sd;
3628 
3629     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3630         return -TARGET_EFAULT;
3631     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3632         return -TARGET_EFAULT;
3633     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3634     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3635     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3636     unlock_user_struct(target_sd, target_addr, 1);
3637     return 0;
3638 }
3639 
3640 struct target_seminfo {
3641     int semmap;
3642     int semmni;
3643     int semmns;
3644     int semmnu;
3645     int semmsl;
3646     int semopm;
3647     int semume;
3648     int semusz;
3649     int semvmx;
3650     int semaem;
3651 };
3652 
3653 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3654                                               struct seminfo *host_seminfo)
3655 {
3656     struct target_seminfo *target_seminfo;
3657     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3658         return -TARGET_EFAULT;
3659     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3660     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3661     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3662     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3663     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3664     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3665     __put_user(host_seminfo->semume, &target_seminfo->semume);
3666     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3667     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3668     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3669     unlock_user_struct(target_seminfo, target_addr, 1);
3670     return 0;
3671 }
3672 
3673 union semun {
3674 	int val;
3675 	struct semid_ds *buf;
3676 	unsigned short *array;
3677 	struct seminfo *__buf;
3678 };
3679 
3680 union target_semun {
3681 	int val;
3682 	abi_ulong buf;
3683 	abi_ulong array;
3684 	abi_ulong __buf;
3685 };
3686 
3687 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3688                                                abi_ulong target_addr)
3689 {
3690     int nsems;
3691     unsigned short *array;
3692     union semun semun;
3693     struct semid_ds semid_ds;
3694     int i, ret;
3695 
3696     semun.buf = &semid_ds;
3697 
3698     ret = semctl(semid, 0, IPC_STAT, semun);
3699     if (ret == -1)
3700         return get_errno(ret);
3701 
3702     nsems = semid_ds.sem_nsems;
3703 
3704     *host_array = g_try_new(unsigned short, nsems);
3705     if (!*host_array) {
3706         return -TARGET_ENOMEM;
3707     }
3708     array = lock_user(VERIFY_READ, target_addr,
3709                       nsems*sizeof(unsigned short), 1);
3710     if (!array) {
3711         g_free(*host_array);
3712         return -TARGET_EFAULT;
3713     }
3714 
3715     for(i=0; i<nsems; i++) {
3716         __get_user((*host_array)[i], &array[i]);
3717     }
3718     unlock_user(array, target_addr, 0);
3719 
3720     return 0;
3721 }
3722 
3723 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3724                                                unsigned short **host_array)
3725 {
3726     int nsems;
3727     unsigned short *array;
3728     union semun semun;
3729     struct semid_ds semid_ds;
3730     int i, ret;
3731 
3732     semun.buf = &semid_ds;
3733 
3734     ret = semctl(semid, 0, IPC_STAT, semun);
3735     if (ret == -1)
3736         return get_errno(ret);
3737 
3738     nsems = semid_ds.sem_nsems;
3739 
3740     array = lock_user(VERIFY_WRITE, target_addr,
3741                       nsems*sizeof(unsigned short), 0);
3742     if (!array)
3743         return -TARGET_EFAULT;
3744 
3745     for(i=0; i<nsems; i++) {
3746         __put_user((*host_array)[i], &array[i]);
3747     }
3748     g_free(*host_array);
3749     unlock_user(array, target_addr, 1);
3750 
3751     return 0;
3752 }
3753 
3754 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3755                                  abi_ulong target_arg)
3756 {
3757     union target_semun target_su = { .buf = target_arg };
3758     union semun arg;
3759     struct semid_ds dsarg;
3760     unsigned short *array = NULL;
3761     struct seminfo seminfo;
3762     abi_long ret = -TARGET_EINVAL;
3763     abi_long err;
3764     cmd &= 0xff;
3765 
3766     switch( cmd ) {
3767 	case GETVAL:
3768 	case SETVAL:
3769             /* In 64 bit cross-endian situations, we will erroneously pick up
3770              * the wrong half of the union for the "val" element.  To rectify
3771              * this, the entire 8-byte structure is byteswapped, followed by
3772 	     * a swap of the 4 byte val field. In other cases, the data is
3773 	     * already in proper host byte order. */
3774 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3775 		target_su.buf = tswapal(target_su.buf);
3776 		arg.val = tswap32(target_su.val);
3777 	    } else {
3778 		arg.val = target_su.val;
3779 	    }
3780             ret = get_errno(semctl(semid, semnum, cmd, arg));
3781             break;
3782 	case GETALL:
3783 	case SETALL:
3784             err = target_to_host_semarray(semid, &array, target_su.array);
3785             if (err)
3786                 return err;
3787             arg.array = array;
3788             ret = get_errno(semctl(semid, semnum, cmd, arg));
3789             err = host_to_target_semarray(semid, target_su.array, &array);
3790             if (err)
3791                 return err;
3792             break;
3793 	case IPC_STAT:
3794 	case IPC_SET:
3795 	case SEM_STAT:
3796             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3797             if (err)
3798                 return err;
3799             arg.buf = &dsarg;
3800             ret = get_errno(semctl(semid, semnum, cmd, arg));
3801             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3802             if (err)
3803                 return err;
3804             break;
3805 	case IPC_INFO:
3806 	case SEM_INFO:
3807             arg.__buf = &seminfo;
3808             ret = get_errno(semctl(semid, semnum, cmd, arg));
3809             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3810             if (err)
3811                 return err;
3812             break;
3813 	case IPC_RMID:
3814 	case GETPID:
3815 	case GETNCNT:
3816 	case GETZCNT:
3817             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3818             break;
3819     }
3820 
3821     return ret;
3822 }
3823 
3824 struct target_sembuf {
3825     unsigned short sem_num;
3826     short sem_op;
3827     short sem_flg;
3828 };
3829 
3830 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3831                                              abi_ulong target_addr,
3832                                              unsigned nsops)
3833 {
3834     struct target_sembuf *target_sembuf;
3835     int i;
3836 
3837     target_sembuf = lock_user(VERIFY_READ, target_addr,
3838                               nsops*sizeof(struct target_sembuf), 1);
3839     if (!target_sembuf)
3840         return -TARGET_EFAULT;
3841 
3842     for(i=0; i<nsops; i++) {
3843         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3844         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3845         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3846     }
3847 
3848     unlock_user(target_sembuf, target_addr, 0);
3849 
3850     return 0;
3851 }
3852 
3853 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3854     defined(TARGET_NR_semtimedop)
3855 
3856 /*
3857  * This macro is required to handle the s390 variants, which passes the
3858  * arguments in a different order than default.
3859  */
3860 #ifdef __s390x__
3861 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3862   (__nsops), (__timeout), (__sops)
3863 #else
3864 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3865   (__nsops), 0, (__sops), (__timeout)
3866 #endif
3867 
3868 static inline abi_long do_semtimedop(int semid,
3869                                      abi_long ptr,
3870                                      unsigned nsops,
3871                                      abi_long timeout)
3872 {
3873     struct sembuf *sops;
3874     struct timespec ts, *pts = NULL;
3875     abi_long ret;
3876 
3877     if (timeout) {
3878         pts = &ts;
3879         if (target_to_host_timespec(pts, timeout)) {
3880             return -TARGET_EFAULT;
3881         }
3882     }
3883 
3884     if (nsops > TARGET_SEMOPM) {
3885         return -TARGET_E2BIG;
3886     }
3887 
3888     sops = g_new(struct sembuf, nsops);
3889 
3890     if (target_to_host_sembuf(sops, ptr, nsops)) {
3891         g_free(sops);
3892         return -TARGET_EFAULT;
3893     }
3894 
3895     ret = -TARGET_ENOSYS;
3896 #ifdef __NR_semtimedop
3897     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3898 #endif
3899 #ifdef __NR_ipc
3900     if (ret == -TARGET_ENOSYS) {
3901         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3902                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3903     }
3904 #endif
3905     g_free(sops);
3906     return ret;
3907 }
3908 #endif
3909 
3910 struct target_msqid_ds
3911 {
3912     struct target_ipc_perm msg_perm;
3913     abi_ulong msg_stime;
3914 #if TARGET_ABI_BITS == 32
3915     abi_ulong __unused1;
3916 #endif
3917     abi_ulong msg_rtime;
3918 #if TARGET_ABI_BITS == 32
3919     abi_ulong __unused2;
3920 #endif
3921     abi_ulong msg_ctime;
3922 #if TARGET_ABI_BITS == 32
3923     abi_ulong __unused3;
3924 #endif
3925     abi_ulong __msg_cbytes;
3926     abi_ulong msg_qnum;
3927     abi_ulong msg_qbytes;
3928     abi_ulong msg_lspid;
3929     abi_ulong msg_lrpid;
3930     abi_ulong __unused4;
3931     abi_ulong __unused5;
3932 };
3933 
3934 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3935                                                abi_ulong target_addr)
3936 {
3937     struct target_msqid_ds *target_md;
3938 
3939     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3940         return -TARGET_EFAULT;
3941     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3942         return -TARGET_EFAULT;
3943     host_md->msg_stime = tswapal(target_md->msg_stime);
3944     host_md->msg_rtime = tswapal(target_md->msg_rtime);
3945     host_md->msg_ctime = tswapal(target_md->msg_ctime);
3946     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3947     host_md->msg_qnum = tswapal(target_md->msg_qnum);
3948     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3949     host_md->msg_lspid = tswapal(target_md->msg_lspid);
3950     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3951     unlock_user_struct(target_md, target_addr, 0);
3952     return 0;
3953 }
3954 
3955 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3956                                                struct msqid_ds *host_md)
3957 {
3958     struct target_msqid_ds *target_md;
3959 
3960     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3961         return -TARGET_EFAULT;
3962     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3963         return -TARGET_EFAULT;
3964     target_md->msg_stime = tswapal(host_md->msg_stime);
3965     target_md->msg_rtime = tswapal(host_md->msg_rtime);
3966     target_md->msg_ctime = tswapal(host_md->msg_ctime);
3967     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3968     target_md->msg_qnum = tswapal(host_md->msg_qnum);
3969     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3970     target_md->msg_lspid = tswapal(host_md->msg_lspid);
3971     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3972     unlock_user_struct(target_md, target_addr, 1);
3973     return 0;
3974 }
3975 
3976 struct target_msginfo {
3977     int msgpool;
3978     int msgmap;
3979     int msgmax;
3980     int msgmnb;
3981     int msgmni;
3982     int msgssz;
3983     int msgtql;
3984     unsigned short int msgseg;
3985 };
3986 
3987 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3988                                               struct msginfo *host_msginfo)
3989 {
3990     struct target_msginfo *target_msginfo;
3991     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3992         return -TARGET_EFAULT;
3993     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3994     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3995     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3996     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3997     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3998     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3999     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4000     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4001     unlock_user_struct(target_msginfo, target_addr, 1);
4002     return 0;
4003 }
4004 
4005 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4006 {
4007     struct msqid_ds dsarg;
4008     struct msginfo msginfo;
4009     abi_long ret = -TARGET_EINVAL;
4010 
4011     cmd &= 0xff;
4012 
4013     switch (cmd) {
4014     case IPC_STAT:
4015     case IPC_SET:
4016     case MSG_STAT:
4017         if (target_to_host_msqid_ds(&dsarg,ptr))
4018             return -TARGET_EFAULT;
4019         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4020         if (host_to_target_msqid_ds(ptr,&dsarg))
4021             return -TARGET_EFAULT;
4022         break;
4023     case IPC_RMID:
4024         ret = get_errno(msgctl(msgid, cmd, NULL));
4025         break;
4026     case IPC_INFO:
4027     case MSG_INFO:
4028         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4029         if (host_to_target_msginfo(ptr, &msginfo))
4030             return -TARGET_EFAULT;
4031         break;
4032     }
4033 
4034     return ret;
4035 }
4036 
4037 struct target_msgbuf {
4038     abi_long mtype;
4039     char	mtext[1];
4040 };
4041 
4042 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4043                                  ssize_t msgsz, int msgflg)
4044 {
4045     struct target_msgbuf *target_mb;
4046     struct msgbuf *host_mb;
4047     abi_long ret = 0;
4048 
4049     if (msgsz < 0) {
4050         return -TARGET_EINVAL;
4051     }
4052 
4053     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4054         return -TARGET_EFAULT;
4055     host_mb = g_try_malloc(msgsz + sizeof(long));
4056     if (!host_mb) {
4057         unlock_user_struct(target_mb, msgp, 0);
4058         return -TARGET_ENOMEM;
4059     }
4060     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4061     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4062     ret = -TARGET_ENOSYS;
4063 #ifdef __NR_msgsnd
4064     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4065 #endif
4066 #ifdef __NR_ipc
4067     if (ret == -TARGET_ENOSYS) {
4068 #ifdef __s390x__
4069         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4070                                  host_mb));
4071 #else
4072         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4073                                  host_mb, 0));
4074 #endif
4075     }
4076 #endif
4077     g_free(host_mb);
4078     unlock_user_struct(target_mb, msgp, 0);
4079 
4080     return ret;
4081 }
4082 
4083 #ifdef __NR_ipc
4084 #if defined(__sparc__)
4085 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4086 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4087 #elif defined(__s390x__)
4088 /* The s390 sys_ipc variant has only five parameters.  */
4089 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4090     ((long int[]){(long int)__msgp, __msgtyp})
4091 #else
4092 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4093     ((long int[]){(long int)__msgp, __msgtyp}), 0
4094 #endif
4095 #endif
4096 
4097 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4098                                  ssize_t msgsz, abi_long msgtyp,
4099                                  int msgflg)
4100 {
4101     struct target_msgbuf *target_mb;
4102     char *target_mtext;
4103     struct msgbuf *host_mb;
4104     abi_long ret = 0;
4105 
4106     if (msgsz < 0) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4111         return -TARGET_EFAULT;
4112 
4113     host_mb = g_try_malloc(msgsz + sizeof(long));
4114     if (!host_mb) {
4115         ret = -TARGET_ENOMEM;
4116         goto end;
4117     }
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_msgrcv
4120     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4125                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4126     }
4127 #endif
4128 
4129     if (ret > 0) {
4130         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4131         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4132         if (!target_mtext) {
4133             ret = -TARGET_EFAULT;
4134             goto end;
4135         }
4136         memcpy(target_mb->mtext, host_mb->mtext, ret);
4137         unlock_user(target_mtext, target_mtext_addr, ret);
4138     }
4139 
4140     target_mb->mtype = tswapal(host_mb->mtype);
4141 
4142 end:
4143     if (target_mb)
4144         unlock_user_struct(target_mb, msgp, 1);
4145     g_free(host_mb);
4146     return ret;
4147 }
4148 
4149 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4150                                                abi_ulong target_addr)
4151 {
4152     struct target_shmid_ds *target_sd;
4153 
4154     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4155         return -TARGET_EFAULT;
4156     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4157         return -TARGET_EFAULT;
4158     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4159     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4160     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4161     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4162     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4163     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4164     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4165     unlock_user_struct(target_sd, target_addr, 0);
4166     return 0;
4167 }
4168 
4169 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4170                                                struct shmid_ds *host_sd)
4171 {
4172     struct target_shmid_ds *target_sd;
4173 
4174     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4175         return -TARGET_EFAULT;
4176     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4177         return -TARGET_EFAULT;
4178     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4179     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4180     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4181     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4182     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4183     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4184     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4185     unlock_user_struct(target_sd, target_addr, 1);
4186     return 0;
4187 }
4188 
4189 struct  target_shminfo {
4190     abi_ulong shmmax;
4191     abi_ulong shmmin;
4192     abi_ulong shmmni;
4193     abi_ulong shmseg;
4194     abi_ulong shmall;
4195 };
4196 
4197 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4198                                               struct shminfo *host_shminfo)
4199 {
4200     struct target_shminfo *target_shminfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4204     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4205     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4206     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4207     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4208     unlock_user_struct(target_shminfo, target_addr, 1);
4209     return 0;
4210 }
4211 
4212 struct target_shm_info {
4213     int used_ids;
4214     abi_ulong shm_tot;
4215     abi_ulong shm_rss;
4216     abi_ulong shm_swp;
4217     abi_ulong swap_attempts;
4218     abi_ulong swap_successes;
4219 };
4220 
4221 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4222                                                struct shm_info *host_shm_info)
4223 {
4224     struct target_shm_info *target_shm_info;
4225     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4226         return -TARGET_EFAULT;
4227     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4228     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4229     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4230     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4231     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4232     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4233     unlock_user_struct(target_shm_info, target_addr, 1);
4234     return 0;
4235 }
4236 
4237 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4238 {
4239     struct shmid_ds dsarg;
4240     struct shminfo shminfo;
4241     struct shm_info shm_info;
4242     abi_long ret = -TARGET_EINVAL;
4243 
4244     cmd &= 0xff;
4245 
4246     switch(cmd) {
4247     case IPC_STAT:
4248     case IPC_SET:
4249     case SHM_STAT:
4250         if (target_to_host_shmid_ds(&dsarg, buf))
4251             return -TARGET_EFAULT;
4252         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4253         if (host_to_target_shmid_ds(buf, &dsarg))
4254             return -TARGET_EFAULT;
4255         break;
4256     case IPC_INFO:
4257         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4258         if (host_to_target_shminfo(buf, &shminfo))
4259             return -TARGET_EFAULT;
4260         break;
4261     case SHM_INFO:
4262         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4263         if (host_to_target_shm_info(buf, &shm_info))
4264             return -TARGET_EFAULT;
4265         break;
4266     case IPC_RMID:
4267     case SHM_LOCK:
4268     case SHM_UNLOCK:
4269         ret = get_errno(shmctl(shmid, cmd, NULL));
4270         break;
4271     }
4272 
4273     return ret;
4274 }
4275 
4276 #ifndef TARGET_FORCE_SHMLBA
4277 /* For most architectures, SHMLBA is the same as the page size;
4278  * some architectures have larger values, in which case they should
4279  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4280  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4281  * and defining its own value for SHMLBA.
4282  *
4283  * The kernel also permits SHMLBA to be set by the architecture to a
4284  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4285  * this means that addresses are rounded to the large size if
4286  * SHM_RND is set but addresses not aligned to that size are not rejected
4287  * as long as they are at least page-aligned. Since the only architecture
4288  * which uses this is ia64 this code doesn't provide for that oddity.
4289  */
4290 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4291 {
4292     return TARGET_PAGE_SIZE;
4293 }
4294 #endif
4295 
4296 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4297                                  int shmid, abi_ulong shmaddr, int shmflg)
4298 {
4299     abi_long raddr;
4300     void *host_raddr;
4301     struct shmid_ds shm_info;
4302     int i,ret;
4303     abi_ulong shmlba;
4304 
4305     /* find out the length of the shared memory segment */
4306     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4307     if (is_error(ret)) {
4308         /* can't get length, bail out */
4309         return ret;
4310     }
4311 
4312     shmlba = target_shmlba(cpu_env);
4313 
4314     if (shmaddr & (shmlba - 1)) {
4315         if (shmflg & SHM_RND) {
4316             shmaddr &= ~(shmlba - 1);
4317         } else {
4318             return -TARGET_EINVAL;
4319         }
4320     }
4321     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4322         return -TARGET_EINVAL;
4323     }
4324 
4325     mmap_lock();
4326 
4327     if (shmaddr)
4328         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4329     else {
4330         abi_ulong mmap_start;
4331 
4332         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4333         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4334 
4335         if (mmap_start == -1) {
4336             errno = ENOMEM;
4337             host_raddr = (void *)-1;
4338         } else
4339             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4340     }
4341 
4342     if (host_raddr == (void *)-1) {
4343         mmap_unlock();
4344         return get_errno((long)host_raddr);
4345     }
4346     raddr=h2g((unsigned long)host_raddr);
4347 
4348     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4349                    PAGE_VALID | PAGE_READ |
4350                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4351 
4352     for (i = 0; i < N_SHM_REGIONS; i++) {
4353         if (!shm_regions[i].in_use) {
4354             shm_regions[i].in_use = true;
4355             shm_regions[i].start = raddr;
4356             shm_regions[i].size = shm_info.shm_segsz;
4357             break;
4358         }
4359     }
4360 
4361     mmap_unlock();
4362     return raddr;
4363 
4364 }
4365 
4366 static inline abi_long do_shmdt(abi_ulong shmaddr)
4367 {
4368     int i;
4369     abi_long rv;
4370 
4371     mmap_lock();
4372 
4373     for (i = 0; i < N_SHM_REGIONS; ++i) {
4374         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4375             shm_regions[i].in_use = false;
4376             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4377             break;
4378         }
4379     }
4380     rv = get_errno(shmdt(g2h(shmaddr)));
4381 
4382     mmap_unlock();
4383 
4384     return rv;
4385 }
4386 
4387 #ifdef TARGET_NR_ipc
4388 /* ??? This only works with linear mappings.  */
4389 /* do_ipc() must return target values and target errnos. */
4390 static abi_long do_ipc(CPUArchState *cpu_env,
4391                        unsigned int call, abi_long first,
4392                        abi_long second, abi_long third,
4393                        abi_long ptr, abi_long fifth)
4394 {
4395     int version;
4396     abi_long ret = 0;
4397 
4398     version = call >> 16;
4399     call &= 0xffff;
4400 
4401     switch (call) {
4402     case IPCOP_semop:
4403         ret = do_semtimedop(first, ptr, second, 0);
4404         break;
4405     case IPCOP_semtimedop:
4406     /*
4407      * The s390 sys_ipc variant has only five parameters instead of six
4408      * (as for default variant) and the only difference is the handling of
4409      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4410      * to a struct timespec where the generic variant uses fifth parameter.
4411      */
4412 #if defined(TARGET_S390X)
4413         ret = do_semtimedop(first, ptr, second, third);
4414 #else
4415         ret = do_semtimedop(first, ptr, second, fifth);
4416 #endif
4417         break;
4418 
4419     case IPCOP_semget:
4420         ret = get_errno(semget(first, second, third));
4421         break;
4422 
4423     case IPCOP_semctl: {
4424         /* The semun argument to semctl is passed by value, so dereference the
4425          * ptr argument. */
4426         abi_ulong atptr;
4427         get_user_ual(atptr, ptr);
4428         ret = do_semctl(first, second, third, atptr);
4429         break;
4430     }
4431 
4432     case IPCOP_msgget:
4433         ret = get_errno(msgget(first, second));
4434         break;
4435 
4436     case IPCOP_msgsnd:
4437         ret = do_msgsnd(first, ptr, second, third);
4438         break;
4439 
4440     case IPCOP_msgctl:
4441         ret = do_msgctl(first, second, ptr);
4442         break;
4443 
4444     case IPCOP_msgrcv:
4445         switch (version) {
4446         case 0:
4447             {
4448                 struct target_ipc_kludge {
4449                     abi_long msgp;
4450                     abi_long msgtyp;
4451                 } *tmp;
4452 
4453                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4454                     ret = -TARGET_EFAULT;
4455                     break;
4456                 }
4457 
4458                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4459 
4460                 unlock_user_struct(tmp, ptr, 0);
4461                 break;
4462             }
4463         default:
4464             ret = do_msgrcv(first, ptr, second, fifth, third);
4465         }
4466         break;
4467 
4468     case IPCOP_shmat:
4469         switch (version) {
4470         default:
4471         {
4472             abi_ulong raddr;
4473             raddr = do_shmat(cpu_env, first, ptr, second);
4474             if (is_error(raddr))
4475                 return get_errno(raddr);
4476             if (put_user_ual(raddr, third))
4477                 return -TARGET_EFAULT;
4478             break;
4479         }
4480         case 1:
4481             ret = -TARGET_EINVAL;
4482             break;
4483         }
4484 	break;
4485     case IPCOP_shmdt:
4486         ret = do_shmdt(ptr);
4487 	break;
4488 
4489     case IPCOP_shmget:
4490 	/* IPC_* flag values are the same on all linux platforms */
4491 	ret = get_errno(shmget(first, second, third));
4492 	break;
4493 
4494 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4495     case IPCOP_shmctl:
4496         ret = do_shmctl(first, second, ptr);
4497         break;
4498     default:
4499         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4500                       call, version);
4501 	ret = -TARGET_ENOSYS;
4502 	break;
4503     }
4504     return ret;
4505 }
4506 #endif
4507 
4508 /* kernel structure types definitions */
4509 
4510 #define STRUCT(name, ...) STRUCT_ ## name,
4511 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4512 enum {
4513 #include "syscall_types.h"
4514 STRUCT_MAX
4515 };
4516 #undef STRUCT
4517 #undef STRUCT_SPECIAL
4518 
4519 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4520 #define STRUCT_SPECIAL(name)
4521 #include "syscall_types.h"
4522 #undef STRUCT
4523 #undef STRUCT_SPECIAL
4524 
4525 #define MAX_STRUCT_SIZE 4096
4526 
4527 #ifdef CONFIG_FIEMAP
4528 /* So fiemap access checks don't overflow on 32 bit systems.
4529  * This is very slightly smaller than the limit imposed by
4530  * the underlying kernel.
4531  */
4532 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4533                             / sizeof(struct fiemap_extent))
4534 
4535 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4536                                        int fd, int cmd, abi_long arg)
4537 {
4538     /* The parameter for this ioctl is a struct fiemap followed
4539      * by an array of struct fiemap_extent whose size is set
4540      * in fiemap->fm_extent_count. The array is filled in by the
4541      * ioctl.
4542      */
4543     int target_size_in, target_size_out;
4544     struct fiemap *fm;
4545     const argtype *arg_type = ie->arg_type;
4546     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4547     void *argptr, *p;
4548     abi_long ret;
4549     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4550     uint32_t outbufsz;
4551     int free_fm = 0;
4552 
4553     assert(arg_type[0] == TYPE_PTR);
4554     assert(ie->access == IOC_RW);
4555     arg_type++;
4556     target_size_in = thunk_type_size(arg_type, 0);
4557     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4558     if (!argptr) {
4559         return -TARGET_EFAULT;
4560     }
4561     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4562     unlock_user(argptr, arg, 0);
4563     fm = (struct fiemap *)buf_temp;
4564     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4565         return -TARGET_EINVAL;
4566     }
4567 
4568     outbufsz = sizeof (*fm) +
4569         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4570 
4571     if (outbufsz > MAX_STRUCT_SIZE) {
4572         /* We can't fit all the extents into the fixed size buffer.
4573          * Allocate one that is large enough and use it instead.
4574          */
4575         fm = g_try_malloc(outbufsz);
4576         if (!fm) {
4577             return -TARGET_ENOMEM;
4578         }
4579         memcpy(fm, buf_temp, sizeof(struct fiemap));
4580         free_fm = 1;
4581     }
4582     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4583     if (!is_error(ret)) {
4584         target_size_out = target_size_in;
4585         /* An extent_count of 0 means we were only counting the extents
4586          * so there are no structs to copy
4587          */
4588         if (fm->fm_extent_count != 0) {
4589             target_size_out += fm->fm_mapped_extents * extent_size;
4590         }
4591         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4592         if (!argptr) {
4593             ret = -TARGET_EFAULT;
4594         } else {
4595             /* Convert the struct fiemap */
4596             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4597             if (fm->fm_extent_count != 0) {
4598                 p = argptr + target_size_in;
4599                 /* ...and then all the struct fiemap_extents */
4600                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4601                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4602                                   THUNK_TARGET);
4603                     p += extent_size;
4604                 }
4605             }
4606             unlock_user(argptr, arg, target_size_out);
4607         }
4608     }
4609     if (free_fm) {
4610         g_free(fm);
4611     }
4612     return ret;
4613 }
4614 #endif
4615 
4616 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4617                                 int fd, int cmd, abi_long arg)
4618 {
4619     const argtype *arg_type = ie->arg_type;
4620     int target_size;
4621     void *argptr;
4622     int ret;
4623     struct ifconf *host_ifconf;
4624     uint32_t outbufsz;
4625     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4626     int target_ifreq_size;
4627     int nb_ifreq;
4628     int free_buf = 0;
4629     int i;
4630     int target_ifc_len;
4631     abi_long target_ifc_buf;
4632     int host_ifc_len;
4633     char *host_ifc_buf;
4634 
4635     assert(arg_type[0] == TYPE_PTR);
4636     assert(ie->access == IOC_RW);
4637 
4638     arg_type++;
4639     target_size = thunk_type_size(arg_type, 0);
4640 
4641     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4642     if (!argptr)
4643         return -TARGET_EFAULT;
4644     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4645     unlock_user(argptr, arg, 0);
4646 
4647     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4648     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4649     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4650 
4651     if (target_ifc_buf != 0) {
4652         target_ifc_len = host_ifconf->ifc_len;
4653         nb_ifreq = target_ifc_len / target_ifreq_size;
4654         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4655 
4656         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4657         if (outbufsz > MAX_STRUCT_SIZE) {
4658             /*
4659              * We can't fit all the extents into the fixed size buffer.
4660              * Allocate one that is large enough and use it instead.
4661              */
4662             host_ifconf = malloc(outbufsz);
4663             if (!host_ifconf) {
4664                 return -TARGET_ENOMEM;
4665             }
4666             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4667             free_buf = 1;
4668         }
4669         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4670 
4671         host_ifconf->ifc_len = host_ifc_len;
4672     } else {
4673       host_ifc_buf = NULL;
4674     }
4675     host_ifconf->ifc_buf = host_ifc_buf;
4676 
4677     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4678     if (!is_error(ret)) {
4679 	/* convert host ifc_len to target ifc_len */
4680 
4681         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4682         target_ifc_len = nb_ifreq * target_ifreq_size;
4683         host_ifconf->ifc_len = target_ifc_len;
4684 
4685 	/* restore target ifc_buf */
4686 
4687         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4688 
4689 	/* copy struct ifconf to target user */
4690 
4691         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4692         if (!argptr)
4693             return -TARGET_EFAULT;
4694         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4695         unlock_user(argptr, arg, target_size);
4696 
4697         if (target_ifc_buf != 0) {
4698             /* copy ifreq[] to target user */
4699             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4700             for (i = 0; i < nb_ifreq ; i++) {
4701                 thunk_convert(argptr + i * target_ifreq_size,
4702                               host_ifc_buf + i * sizeof(struct ifreq),
4703                               ifreq_arg_type, THUNK_TARGET);
4704             }
4705             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4706         }
4707     }
4708 
4709     if (free_buf) {
4710         free(host_ifconf);
4711     }
4712 
4713     return ret;
4714 }
4715 
4716 #if defined(CONFIG_USBFS)
4717 #if HOST_LONG_BITS > 64
4718 #error USBDEVFS thunks do not support >64 bit hosts yet.
4719 #endif
4720 struct live_urb {
4721     uint64_t target_urb_adr;
4722     uint64_t target_buf_adr;
4723     char *target_buf_ptr;
4724     struct usbdevfs_urb host_urb;
4725 };
4726 
4727 static GHashTable *usbdevfs_urb_hashtable(void)
4728 {
4729     static GHashTable *urb_hashtable;
4730 
4731     if (!urb_hashtable) {
4732         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4733     }
4734     return urb_hashtable;
4735 }
4736 
4737 static void urb_hashtable_insert(struct live_urb *urb)
4738 {
4739     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4740     g_hash_table_insert(urb_hashtable, urb, urb);
4741 }
4742 
4743 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4744 {
4745     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4746     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4747 }
4748 
4749 static void urb_hashtable_remove(struct live_urb *urb)
4750 {
4751     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4752     g_hash_table_remove(urb_hashtable, urb);
4753 }
4754 
4755 static abi_long
4756 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4757                           int fd, int cmd, abi_long arg)
4758 {
4759     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4760     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4761     struct live_urb *lurb;
4762     void *argptr;
4763     uint64_t hurb;
4764     int target_size;
4765     uintptr_t target_urb_adr;
4766     abi_long ret;
4767 
4768     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4769 
4770     memset(buf_temp, 0, sizeof(uint64_t));
4771     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4772     if (is_error(ret)) {
4773         return ret;
4774     }
4775 
4776     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4777     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4778     if (!lurb->target_urb_adr) {
4779         return -TARGET_EFAULT;
4780     }
4781     urb_hashtable_remove(lurb);
4782     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4783         lurb->host_urb.buffer_length);
4784     lurb->target_buf_ptr = NULL;
4785 
4786     /* restore the guest buffer pointer */
4787     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4788 
4789     /* update the guest urb struct */
4790     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4791     if (!argptr) {
4792         g_free(lurb);
4793         return -TARGET_EFAULT;
4794     }
4795     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4796     unlock_user(argptr, lurb->target_urb_adr, target_size);
4797 
4798     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4799     /* write back the urb handle */
4800     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4801     if (!argptr) {
4802         g_free(lurb);
4803         return -TARGET_EFAULT;
4804     }
4805 
4806     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4807     target_urb_adr = lurb->target_urb_adr;
4808     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4809     unlock_user(argptr, arg, target_size);
4810 
4811     g_free(lurb);
4812     return ret;
4813 }
4814 
4815 static abi_long
4816 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4817                              uint8_t *buf_temp __attribute__((unused)),
4818                              int fd, int cmd, abi_long arg)
4819 {
4820     struct live_urb *lurb;
4821 
4822     /* map target address back to host URB with metadata. */
4823     lurb = urb_hashtable_lookup(arg);
4824     if (!lurb) {
4825         return -TARGET_EFAULT;
4826     }
4827     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4828 }
4829 
4830 static abi_long
4831 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4832                             int fd, int cmd, abi_long arg)
4833 {
4834     const argtype *arg_type = ie->arg_type;
4835     int target_size;
4836     abi_long ret;
4837     void *argptr;
4838     int rw_dir;
4839     struct live_urb *lurb;
4840 
4841     /*
4842      * each submitted URB needs to map to a unique ID for the
4843      * kernel, and that unique ID needs to be a pointer to
4844      * host memory.  hence, we need to malloc for each URB.
4845      * isochronous transfers have a variable length struct.
4846      */
4847     arg_type++;
4848     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4849 
4850     /* construct host copy of urb and metadata */
4851     lurb = g_try_malloc0(sizeof(struct live_urb));
4852     if (!lurb) {
4853         return -TARGET_ENOMEM;
4854     }
4855 
4856     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4857     if (!argptr) {
4858         g_free(lurb);
4859         return -TARGET_EFAULT;
4860     }
4861     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4862     unlock_user(argptr, arg, 0);
4863 
4864     lurb->target_urb_adr = arg;
4865     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4866 
4867     /* buffer space used depends on endpoint type so lock the entire buffer */
4868     /* control type urbs should check the buffer contents for true direction */
4869     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4870     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4871         lurb->host_urb.buffer_length, 1);
4872     if (lurb->target_buf_ptr == NULL) {
4873         g_free(lurb);
4874         return -TARGET_EFAULT;
4875     }
4876 
4877     /* update buffer pointer in host copy */
4878     lurb->host_urb.buffer = lurb->target_buf_ptr;
4879 
4880     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4881     if (is_error(ret)) {
4882         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4883         g_free(lurb);
4884     } else {
4885         urb_hashtable_insert(lurb);
4886     }
4887 
4888     return ret;
4889 }
4890 #endif /* CONFIG_USBFS */
4891 
4892 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4893                             int cmd, abi_long arg)
4894 {
4895     void *argptr;
4896     struct dm_ioctl *host_dm;
4897     abi_long guest_data;
4898     uint32_t guest_data_size;
4899     int target_size;
4900     const argtype *arg_type = ie->arg_type;
4901     abi_long ret;
4902     void *big_buf = NULL;
4903     char *host_data;
4904 
4905     arg_type++;
4906     target_size = thunk_type_size(arg_type, 0);
4907     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4908     if (!argptr) {
4909         ret = -TARGET_EFAULT;
4910         goto out;
4911     }
4912     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4913     unlock_user(argptr, arg, 0);
4914 
4915     /* buf_temp is too small, so fetch things into a bigger buffer */
4916     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4917     memcpy(big_buf, buf_temp, target_size);
4918     buf_temp = big_buf;
4919     host_dm = big_buf;
4920 
4921     guest_data = arg + host_dm->data_start;
4922     if ((guest_data - arg) < 0) {
4923         ret = -TARGET_EINVAL;
4924         goto out;
4925     }
4926     guest_data_size = host_dm->data_size - host_dm->data_start;
4927     host_data = (char*)host_dm + host_dm->data_start;
4928 
4929     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4930     if (!argptr) {
4931         ret = -TARGET_EFAULT;
4932         goto out;
4933     }
4934 
4935     switch (ie->host_cmd) {
4936     case DM_REMOVE_ALL:
4937     case DM_LIST_DEVICES:
4938     case DM_DEV_CREATE:
4939     case DM_DEV_REMOVE:
4940     case DM_DEV_SUSPEND:
4941     case DM_DEV_STATUS:
4942     case DM_DEV_WAIT:
4943     case DM_TABLE_STATUS:
4944     case DM_TABLE_CLEAR:
4945     case DM_TABLE_DEPS:
4946     case DM_LIST_VERSIONS:
4947         /* no input data */
4948         break;
4949     case DM_DEV_RENAME:
4950     case DM_DEV_SET_GEOMETRY:
4951         /* data contains only strings */
4952         memcpy(host_data, argptr, guest_data_size);
4953         break;
4954     case DM_TARGET_MSG:
4955         memcpy(host_data, argptr, guest_data_size);
4956         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4957         break;
4958     case DM_TABLE_LOAD:
4959     {
4960         void *gspec = argptr;
4961         void *cur_data = host_data;
4962         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4963         int spec_size = thunk_type_size(arg_type, 0);
4964         int i;
4965 
4966         for (i = 0; i < host_dm->target_count; i++) {
4967             struct dm_target_spec *spec = cur_data;
4968             uint32_t next;
4969             int slen;
4970 
4971             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4972             slen = strlen((char*)gspec + spec_size) + 1;
4973             next = spec->next;
4974             spec->next = sizeof(*spec) + slen;
4975             strcpy((char*)&spec[1], gspec + spec_size);
4976             gspec += next;
4977             cur_data += spec->next;
4978         }
4979         break;
4980     }
4981     default:
4982         ret = -TARGET_EINVAL;
4983         unlock_user(argptr, guest_data, 0);
4984         goto out;
4985     }
4986     unlock_user(argptr, guest_data, 0);
4987 
4988     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4989     if (!is_error(ret)) {
4990         guest_data = arg + host_dm->data_start;
4991         guest_data_size = host_dm->data_size - host_dm->data_start;
4992         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4993         switch (ie->host_cmd) {
4994         case DM_REMOVE_ALL:
4995         case DM_DEV_CREATE:
4996         case DM_DEV_REMOVE:
4997         case DM_DEV_RENAME:
4998         case DM_DEV_SUSPEND:
4999         case DM_DEV_STATUS:
5000         case DM_TABLE_LOAD:
5001         case DM_TABLE_CLEAR:
5002         case DM_TARGET_MSG:
5003         case DM_DEV_SET_GEOMETRY:
5004             /* no return data */
5005             break;
5006         case DM_LIST_DEVICES:
5007         {
5008             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5009             uint32_t remaining_data = guest_data_size;
5010             void *cur_data = argptr;
5011             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5012             int nl_size = 12; /* can't use thunk_size due to alignment */
5013 
5014             while (1) {
5015                 uint32_t next = nl->next;
5016                 if (next) {
5017                     nl->next = nl_size + (strlen(nl->name) + 1);
5018                 }
5019                 if (remaining_data < nl->next) {
5020                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5021                     break;
5022                 }
5023                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5024                 strcpy(cur_data + nl_size, nl->name);
5025                 cur_data += nl->next;
5026                 remaining_data -= nl->next;
5027                 if (!next) {
5028                     break;
5029                 }
5030                 nl = (void*)nl + next;
5031             }
5032             break;
5033         }
5034         case DM_DEV_WAIT:
5035         case DM_TABLE_STATUS:
5036         {
5037             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5038             void *cur_data = argptr;
5039             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5040             int spec_size = thunk_type_size(arg_type, 0);
5041             int i;
5042 
5043             for (i = 0; i < host_dm->target_count; i++) {
5044                 uint32_t next = spec->next;
5045                 int slen = strlen((char*)&spec[1]) + 1;
5046                 spec->next = (cur_data - argptr) + spec_size + slen;
5047                 if (guest_data_size < spec->next) {
5048                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5049                     break;
5050                 }
5051                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5052                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5053                 cur_data = argptr + spec->next;
5054                 spec = (void*)host_dm + host_dm->data_start + next;
5055             }
5056             break;
5057         }
5058         case DM_TABLE_DEPS:
5059         {
5060             void *hdata = (void*)host_dm + host_dm->data_start;
5061             int count = *(uint32_t*)hdata;
5062             uint64_t *hdev = hdata + 8;
5063             uint64_t *gdev = argptr + 8;
5064             int i;
5065 
5066             *(uint32_t*)argptr = tswap32(count);
5067             for (i = 0; i < count; i++) {
5068                 *gdev = tswap64(*hdev);
5069                 gdev++;
5070                 hdev++;
5071             }
5072             break;
5073         }
5074         case DM_LIST_VERSIONS:
5075         {
5076             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5077             uint32_t remaining_data = guest_data_size;
5078             void *cur_data = argptr;
5079             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5080             int vers_size = thunk_type_size(arg_type, 0);
5081 
5082             while (1) {
5083                 uint32_t next = vers->next;
5084                 if (next) {
5085                     vers->next = vers_size + (strlen(vers->name) + 1);
5086                 }
5087                 if (remaining_data < vers->next) {
5088                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5089                     break;
5090                 }
5091                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5092                 strcpy(cur_data + vers_size, vers->name);
5093                 cur_data += vers->next;
5094                 remaining_data -= vers->next;
5095                 if (!next) {
5096                     break;
5097                 }
5098                 vers = (void*)vers + next;
5099             }
5100             break;
5101         }
5102         default:
5103             unlock_user(argptr, guest_data, 0);
5104             ret = -TARGET_EINVAL;
5105             goto out;
5106         }
5107         unlock_user(argptr, guest_data, guest_data_size);
5108 
5109         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5110         if (!argptr) {
5111             ret = -TARGET_EFAULT;
5112             goto out;
5113         }
5114         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5115         unlock_user(argptr, arg, target_size);
5116     }
5117 out:
5118     g_free(big_buf);
5119     return ret;
5120 }
5121 
5122 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5123                                int cmd, abi_long arg)
5124 {
5125     void *argptr;
5126     int target_size;
5127     const argtype *arg_type = ie->arg_type;
5128     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5129     abi_long ret;
5130 
5131     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5132     struct blkpg_partition host_part;
5133 
5134     /* Read and convert blkpg */
5135     arg_type++;
5136     target_size = thunk_type_size(arg_type, 0);
5137     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5143     unlock_user(argptr, arg, 0);
5144 
5145     switch (host_blkpg->op) {
5146     case BLKPG_ADD_PARTITION:
5147     case BLKPG_DEL_PARTITION:
5148         /* payload is struct blkpg_partition */
5149         break;
5150     default:
5151         /* Unknown opcode */
5152         ret = -TARGET_EINVAL;
5153         goto out;
5154     }
5155 
5156     /* Read and convert blkpg->data */
5157     arg = (abi_long)(uintptr_t)host_blkpg->data;
5158     target_size = thunk_type_size(part_arg_type, 0);
5159     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5160     if (!argptr) {
5161         ret = -TARGET_EFAULT;
5162         goto out;
5163     }
5164     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5165     unlock_user(argptr, arg, 0);
5166 
5167     /* Swizzle the data pointer to our local copy and call! */
5168     host_blkpg->data = &host_part;
5169     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5170 
5171 out:
5172     return ret;
5173 }
5174 
5175 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5176                                 int fd, int cmd, abi_long arg)
5177 {
5178     const argtype *arg_type = ie->arg_type;
5179     const StructEntry *se;
5180     const argtype *field_types;
5181     const int *dst_offsets, *src_offsets;
5182     int target_size;
5183     void *argptr;
5184     abi_ulong *target_rt_dev_ptr = NULL;
5185     unsigned long *host_rt_dev_ptr = NULL;
5186     abi_long ret;
5187     int i;
5188 
5189     assert(ie->access == IOC_W);
5190     assert(*arg_type == TYPE_PTR);
5191     arg_type++;
5192     assert(*arg_type == TYPE_STRUCT);
5193     target_size = thunk_type_size(arg_type, 0);
5194     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5195     if (!argptr) {
5196         return -TARGET_EFAULT;
5197     }
5198     arg_type++;
5199     assert(*arg_type == (int)STRUCT_rtentry);
5200     se = struct_entries + *arg_type++;
5201     assert(se->convert[0] == NULL);
5202     /* convert struct here to be able to catch rt_dev string */
5203     field_types = se->field_types;
5204     dst_offsets = se->field_offsets[THUNK_HOST];
5205     src_offsets = se->field_offsets[THUNK_TARGET];
5206     for (i = 0; i < se->nb_fields; i++) {
5207         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5208             assert(*field_types == TYPE_PTRVOID);
5209             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5210             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5211             if (*target_rt_dev_ptr != 0) {
5212                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5213                                                   tswapal(*target_rt_dev_ptr));
5214                 if (!*host_rt_dev_ptr) {
5215                     unlock_user(argptr, arg, 0);
5216                     return -TARGET_EFAULT;
5217                 }
5218             } else {
5219                 *host_rt_dev_ptr = 0;
5220             }
5221             field_types++;
5222             continue;
5223         }
5224         field_types = thunk_convert(buf_temp + dst_offsets[i],
5225                                     argptr + src_offsets[i],
5226                                     field_types, THUNK_HOST);
5227     }
5228     unlock_user(argptr, arg, 0);
5229 
5230     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5231 
5232     assert(host_rt_dev_ptr != NULL);
5233     assert(target_rt_dev_ptr != NULL);
5234     if (*host_rt_dev_ptr != 0) {
5235         unlock_user((void *)*host_rt_dev_ptr,
5236                     *target_rt_dev_ptr, 0);
5237     }
5238     return ret;
5239 }
5240 
5241 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5242                                      int fd, int cmd, abi_long arg)
5243 {
5244     int sig = target_to_host_signal(arg);
5245     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5246 }
5247 
5248 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5249                                     int fd, int cmd, abi_long arg)
5250 {
5251     struct timeval tv;
5252     abi_long ret;
5253 
5254     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5255     if (is_error(ret)) {
5256         return ret;
5257     }
5258 
5259     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5260         if (copy_to_user_timeval(arg, &tv)) {
5261             return -TARGET_EFAULT;
5262         }
5263     } else {
5264         if (copy_to_user_timeval64(arg, &tv)) {
5265             return -TARGET_EFAULT;
5266         }
5267     }
5268 
5269     return ret;
5270 }
5271 
5272 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5273                                       int fd, int cmd, abi_long arg)
5274 {
5275     struct timespec ts;
5276     abi_long ret;
5277 
5278     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5279     if (is_error(ret)) {
5280         return ret;
5281     }
5282 
5283     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5284         if (host_to_target_timespec(arg, &ts)) {
5285             return -TARGET_EFAULT;
5286         }
5287     } else{
5288         if (host_to_target_timespec64(arg, &ts)) {
5289             return -TARGET_EFAULT;
5290         }
5291     }
5292 
5293     return ret;
5294 }
5295 
5296 #ifdef TIOCGPTPEER
5297 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5298                                      int fd, int cmd, abi_long arg)
5299 {
5300     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5301     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5302 }
5303 #endif
5304 
5305 #ifdef HAVE_DRM_H
5306 
5307 static void unlock_drm_version(struct drm_version *host_ver,
5308                                struct target_drm_version *target_ver,
5309                                bool copy)
5310 {
5311     unlock_user(host_ver->name, target_ver->name,
5312                                 copy ? host_ver->name_len : 0);
5313     unlock_user(host_ver->date, target_ver->date,
5314                                 copy ? host_ver->date_len : 0);
5315     unlock_user(host_ver->desc, target_ver->desc,
5316                                 copy ? host_ver->desc_len : 0);
5317 }
5318 
5319 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5320                                           struct target_drm_version *target_ver)
5321 {
5322     memset(host_ver, 0, sizeof(*host_ver));
5323 
5324     __get_user(host_ver->name_len, &target_ver->name_len);
5325     if (host_ver->name_len) {
5326         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5327                                    target_ver->name_len, 0);
5328         if (!host_ver->name) {
5329             return -EFAULT;
5330         }
5331     }
5332 
5333     __get_user(host_ver->date_len, &target_ver->date_len);
5334     if (host_ver->date_len) {
5335         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5336                                    target_ver->date_len, 0);
5337         if (!host_ver->date) {
5338             goto err;
5339         }
5340     }
5341 
5342     __get_user(host_ver->desc_len, &target_ver->desc_len);
5343     if (host_ver->desc_len) {
5344         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5345                                    target_ver->desc_len, 0);
5346         if (!host_ver->desc) {
5347             goto err;
5348         }
5349     }
5350 
5351     return 0;
5352 err:
5353     unlock_drm_version(host_ver, target_ver, false);
5354     return -EFAULT;
5355 }
5356 
5357 static inline void host_to_target_drmversion(
5358                                           struct target_drm_version *target_ver,
5359                                           struct drm_version *host_ver)
5360 {
5361     __put_user(host_ver->version_major, &target_ver->version_major);
5362     __put_user(host_ver->version_minor, &target_ver->version_minor);
5363     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5364     __put_user(host_ver->name_len, &target_ver->name_len);
5365     __put_user(host_ver->date_len, &target_ver->date_len);
5366     __put_user(host_ver->desc_len, &target_ver->desc_len);
5367     unlock_drm_version(host_ver, target_ver, true);
5368 }
5369 
5370 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5371                              int fd, int cmd, abi_long arg)
5372 {
5373     struct drm_version *ver;
5374     struct target_drm_version *target_ver;
5375     abi_long ret;
5376 
5377     switch (ie->host_cmd) {
5378     case DRM_IOCTL_VERSION:
5379         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5380             return -TARGET_EFAULT;
5381         }
5382         ver = (struct drm_version *)buf_temp;
5383         ret = target_to_host_drmversion(ver, target_ver);
5384         if (!is_error(ret)) {
5385             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5386             if (is_error(ret)) {
5387                 unlock_drm_version(ver, target_ver, false);
5388             } else {
5389                 host_to_target_drmversion(target_ver, ver);
5390             }
5391         }
5392         unlock_user_struct(target_ver, arg, 0);
5393         return ret;
5394     }
5395     return -TARGET_ENOSYS;
5396 }
5397 
5398 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5399                                            struct drm_i915_getparam *gparam,
5400                                            int fd, abi_long arg)
5401 {
5402     abi_long ret;
5403     int value;
5404     struct target_drm_i915_getparam *target_gparam;
5405 
5406     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5407         return -TARGET_EFAULT;
5408     }
5409 
5410     __get_user(gparam->param, &target_gparam->param);
5411     gparam->value = &value;
5412     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5413     put_user_s32(value, target_gparam->value);
5414 
5415     unlock_user_struct(target_gparam, arg, 0);
5416     return ret;
5417 }
5418 
5419 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5420                                   int fd, int cmd, abi_long arg)
5421 {
5422     switch (ie->host_cmd) {
5423     case DRM_IOCTL_I915_GETPARAM:
5424         return do_ioctl_drm_i915_getparam(ie,
5425                                           (struct drm_i915_getparam *)buf_temp,
5426                                           fd, arg);
5427     default:
5428         return -TARGET_ENOSYS;
5429     }
5430 }
5431 
5432 #endif
5433 
5434 IOCTLEntry ioctl_entries[] = {
5435 #define IOCTL(cmd, access, ...) \
5436     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5437 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5438     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5439 #define IOCTL_IGNORE(cmd) \
5440     { TARGET_ ## cmd, 0, #cmd },
5441 #include "ioctls.h"
5442     { 0, 0, },
5443 };
5444 
5445 /* ??? Implement proper locking for ioctls.  */
5446 /* do_ioctl() Must return target values and target errnos. */
5447 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5448 {
5449     const IOCTLEntry *ie;
5450     const argtype *arg_type;
5451     abi_long ret;
5452     uint8_t buf_temp[MAX_STRUCT_SIZE];
5453     int target_size;
5454     void *argptr;
5455 
5456     ie = ioctl_entries;
5457     for(;;) {
5458         if (ie->target_cmd == 0) {
5459             qemu_log_mask(
5460                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5461             return -TARGET_ENOSYS;
5462         }
5463         if (ie->target_cmd == cmd)
5464             break;
5465         ie++;
5466     }
5467     arg_type = ie->arg_type;
5468     if (ie->do_ioctl) {
5469         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5470     } else if (!ie->host_cmd) {
5471         /* Some architectures define BSD ioctls in their headers
5472            that are not implemented in Linux.  */
5473         return -TARGET_ENOSYS;
5474     }
5475 
5476     switch(arg_type[0]) {
5477     case TYPE_NULL:
5478         /* no argument */
5479         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5480         break;
5481     case TYPE_PTRVOID:
5482     case TYPE_INT:
5483     case TYPE_LONG:
5484     case TYPE_ULONG:
5485         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5486         break;
5487     case TYPE_PTR:
5488         arg_type++;
5489         target_size = thunk_type_size(arg_type, 0);
5490         switch(ie->access) {
5491         case IOC_R:
5492             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5493             if (!is_error(ret)) {
5494                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5495                 if (!argptr)
5496                     return -TARGET_EFAULT;
5497                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5498                 unlock_user(argptr, arg, target_size);
5499             }
5500             break;
5501         case IOC_W:
5502             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5503             if (!argptr)
5504                 return -TARGET_EFAULT;
5505             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5506             unlock_user(argptr, arg, 0);
5507             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5508             break;
5509         default:
5510         case IOC_RW:
5511             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5512             if (!argptr)
5513                 return -TARGET_EFAULT;
5514             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5515             unlock_user(argptr, arg, 0);
5516             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5517             if (!is_error(ret)) {
5518                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5519                 if (!argptr)
5520                     return -TARGET_EFAULT;
5521                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5522                 unlock_user(argptr, arg, target_size);
5523             }
5524             break;
5525         }
5526         break;
5527     default:
5528         qemu_log_mask(LOG_UNIMP,
5529                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5530                       (long)cmd, arg_type[0]);
5531         ret = -TARGET_ENOSYS;
5532         break;
5533     }
5534     return ret;
5535 }
5536 
5537 static const bitmask_transtbl iflag_tbl[] = {
5538         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5539         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5540         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5541         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5542         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5543         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5544         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5545         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5546         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5547         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5548         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5549         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5550         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5551         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5552         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5553         { 0, 0, 0, 0 }
5554 };
5555 
5556 static const bitmask_transtbl oflag_tbl[] = {
5557 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5558 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5559 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5560 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5561 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5562 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5563 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5564 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5565 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5566 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5567 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5568 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5569 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5570 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5571 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5572 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5573 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5574 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5575 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5576 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5577 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5578 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5579 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5580 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5581 	{ 0, 0, 0, 0 }
5582 };
5583 
5584 static const bitmask_transtbl cflag_tbl[] = {
5585 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5586 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5587 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5588 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5589 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5590 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5591 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5592 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5593 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5594 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5595 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5596 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5597 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5598 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5599 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5600 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5601 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5602 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5603 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5604 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5605 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5606 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5607 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5608 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5609 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5610 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5611 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5612 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5613 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5614 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5615 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5616 	{ 0, 0, 0, 0 }
5617 };
5618 
5619 static const bitmask_transtbl lflag_tbl[] = {
5620   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5621   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5622   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5623   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5624   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5625   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5626   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5627   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5628   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5629   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5630   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5631   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5632   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5633   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5634   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5635   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5636   { 0, 0, 0, 0 }
5637 };
5638 
5639 static void target_to_host_termios (void *dst, const void *src)
5640 {
5641     struct host_termios *host = dst;
5642     const struct target_termios *target = src;
5643 
5644     host->c_iflag =
5645         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5646     host->c_oflag =
5647         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5648     host->c_cflag =
5649         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5650     host->c_lflag =
5651         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5652     host->c_line = target->c_line;
5653 
5654     memset(host->c_cc, 0, sizeof(host->c_cc));
5655     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5656     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5657     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5658     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5659     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5660     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5661     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5662     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5663     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5664     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5665     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5666     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5667     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5668     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5669     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5670     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5671     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5672 }
5673 
5674 static void host_to_target_termios (void *dst, const void *src)
5675 {
5676     struct target_termios *target = dst;
5677     const struct host_termios *host = src;
5678 
5679     target->c_iflag =
5680         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5681     target->c_oflag =
5682         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5683     target->c_cflag =
5684         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5685     target->c_lflag =
5686         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5687     target->c_line = host->c_line;
5688 
5689     memset(target->c_cc, 0, sizeof(target->c_cc));
5690     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5691     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5692     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5693     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5694     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5695     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5696     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5697     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5698     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5699     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5700     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5701     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5702     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5703     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5704     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5705     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5706     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5707 }
5708 
5709 static const StructEntry struct_termios_def = {
5710     .convert = { host_to_target_termios, target_to_host_termios },
5711     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5712     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5713     .print = print_termios,
5714 };
5715 
5716 static bitmask_transtbl mmap_flags_tbl[] = {
5717     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5718     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5719     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5720     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5721       MAP_ANONYMOUS, MAP_ANONYMOUS },
5722     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5723       MAP_GROWSDOWN, MAP_GROWSDOWN },
5724     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5725       MAP_DENYWRITE, MAP_DENYWRITE },
5726     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5727       MAP_EXECUTABLE, MAP_EXECUTABLE },
5728     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5729     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5730       MAP_NORESERVE, MAP_NORESERVE },
5731     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5732     /* MAP_STACK had been ignored by the kernel for quite some time.
5733        Recognize it for the target insofar as we do not want to pass
5734        it through to the host.  */
5735     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5736     { 0, 0, 0, 0 }
5737 };
5738 
5739 /*
5740  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5741  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5742  */
5743 #if defined(TARGET_I386)
5744 
5745 /* NOTE: there is really one LDT for all the threads */
5746 static uint8_t *ldt_table;
5747 
5748 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5749 {
5750     int size;
5751     void *p;
5752 
5753     if (!ldt_table)
5754         return 0;
5755     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5756     if (size > bytecount)
5757         size = bytecount;
5758     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5759     if (!p)
5760         return -TARGET_EFAULT;
5761     /* ??? Should this by byteswapped?  */
5762     memcpy(p, ldt_table, size);
5763     unlock_user(p, ptr, size);
5764     return size;
5765 }
5766 
5767 /* XXX: add locking support */
5768 static abi_long write_ldt(CPUX86State *env,
5769                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5770 {
5771     struct target_modify_ldt_ldt_s ldt_info;
5772     struct target_modify_ldt_ldt_s *target_ldt_info;
5773     int seg_32bit, contents, read_exec_only, limit_in_pages;
5774     int seg_not_present, useable, lm;
5775     uint32_t *lp, entry_1, entry_2;
5776 
5777     if (bytecount != sizeof(ldt_info))
5778         return -TARGET_EINVAL;
5779     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5780         return -TARGET_EFAULT;
5781     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5782     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5783     ldt_info.limit = tswap32(target_ldt_info->limit);
5784     ldt_info.flags = tswap32(target_ldt_info->flags);
5785     unlock_user_struct(target_ldt_info, ptr, 0);
5786 
5787     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5788         return -TARGET_EINVAL;
5789     seg_32bit = ldt_info.flags & 1;
5790     contents = (ldt_info.flags >> 1) & 3;
5791     read_exec_only = (ldt_info.flags >> 3) & 1;
5792     limit_in_pages = (ldt_info.flags >> 4) & 1;
5793     seg_not_present = (ldt_info.flags >> 5) & 1;
5794     useable = (ldt_info.flags >> 6) & 1;
5795 #ifdef TARGET_ABI32
5796     lm = 0;
5797 #else
5798     lm = (ldt_info.flags >> 7) & 1;
5799 #endif
5800     if (contents == 3) {
5801         if (oldmode)
5802             return -TARGET_EINVAL;
5803         if (seg_not_present == 0)
5804             return -TARGET_EINVAL;
5805     }
5806     /* allocate the LDT */
5807     if (!ldt_table) {
5808         env->ldt.base = target_mmap(0,
5809                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5810                                     PROT_READ|PROT_WRITE,
5811                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5812         if (env->ldt.base == -1)
5813             return -TARGET_ENOMEM;
5814         memset(g2h(env->ldt.base), 0,
5815                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5816         env->ldt.limit = 0xffff;
5817         ldt_table = g2h(env->ldt.base);
5818     }
5819 
5820     /* NOTE: same code as Linux kernel */
5821     /* Allow LDTs to be cleared by the user. */
5822     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5823         if (oldmode ||
5824             (contents == 0		&&
5825              read_exec_only == 1	&&
5826              seg_32bit == 0		&&
5827              limit_in_pages == 0	&&
5828              seg_not_present == 1	&&
5829              useable == 0 )) {
5830             entry_1 = 0;
5831             entry_2 = 0;
5832             goto install;
5833         }
5834     }
5835 
5836     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5837         (ldt_info.limit & 0x0ffff);
5838     entry_2 = (ldt_info.base_addr & 0xff000000) |
5839         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5840         (ldt_info.limit & 0xf0000) |
5841         ((read_exec_only ^ 1) << 9) |
5842         (contents << 10) |
5843         ((seg_not_present ^ 1) << 15) |
5844         (seg_32bit << 22) |
5845         (limit_in_pages << 23) |
5846         (lm << 21) |
5847         0x7000;
5848     if (!oldmode)
5849         entry_2 |= (useable << 20);
5850 
5851     /* Install the new entry ...  */
5852 install:
5853     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5854     lp[0] = tswap32(entry_1);
5855     lp[1] = tswap32(entry_2);
5856     return 0;
5857 }
5858 
5859 /* specific and weird i386 syscalls */
5860 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5861                               unsigned long bytecount)
5862 {
5863     abi_long ret;
5864 
5865     switch (func) {
5866     case 0:
5867         ret = read_ldt(ptr, bytecount);
5868         break;
5869     case 1:
5870         ret = write_ldt(env, ptr, bytecount, 1);
5871         break;
5872     case 0x11:
5873         ret = write_ldt(env, ptr, bytecount, 0);
5874         break;
5875     default:
5876         ret = -TARGET_ENOSYS;
5877         break;
5878     }
5879     return ret;
5880 }
5881 
5882 #if defined(TARGET_ABI32)
5883 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5884 {
5885     uint64_t *gdt_table = g2h(env->gdt.base);
5886     struct target_modify_ldt_ldt_s ldt_info;
5887     struct target_modify_ldt_ldt_s *target_ldt_info;
5888     int seg_32bit, contents, read_exec_only, limit_in_pages;
5889     int seg_not_present, useable, lm;
5890     uint32_t *lp, entry_1, entry_2;
5891     int i;
5892 
5893     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5894     if (!target_ldt_info)
5895         return -TARGET_EFAULT;
5896     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5897     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5898     ldt_info.limit = tswap32(target_ldt_info->limit);
5899     ldt_info.flags = tswap32(target_ldt_info->flags);
5900     if (ldt_info.entry_number == -1) {
5901         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5902             if (gdt_table[i] == 0) {
5903                 ldt_info.entry_number = i;
5904                 target_ldt_info->entry_number = tswap32(i);
5905                 break;
5906             }
5907         }
5908     }
5909     unlock_user_struct(target_ldt_info, ptr, 1);
5910 
5911     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5912         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5913            return -TARGET_EINVAL;
5914     seg_32bit = ldt_info.flags & 1;
5915     contents = (ldt_info.flags >> 1) & 3;
5916     read_exec_only = (ldt_info.flags >> 3) & 1;
5917     limit_in_pages = (ldt_info.flags >> 4) & 1;
5918     seg_not_present = (ldt_info.flags >> 5) & 1;
5919     useable = (ldt_info.flags >> 6) & 1;
5920 #ifdef TARGET_ABI32
5921     lm = 0;
5922 #else
5923     lm = (ldt_info.flags >> 7) & 1;
5924 #endif
5925 
5926     if (contents == 3) {
5927         if (seg_not_present == 0)
5928             return -TARGET_EINVAL;
5929     }
5930 
5931     /* NOTE: same code as Linux kernel */
5932     /* Allow LDTs to be cleared by the user. */
5933     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5934         if ((contents == 0             &&
5935              read_exec_only == 1       &&
5936              seg_32bit == 0            &&
5937              limit_in_pages == 0       &&
5938              seg_not_present == 1      &&
5939              useable == 0 )) {
5940             entry_1 = 0;
5941             entry_2 = 0;
5942             goto install;
5943         }
5944     }
5945 
5946     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5947         (ldt_info.limit & 0x0ffff);
5948     entry_2 = (ldt_info.base_addr & 0xff000000) |
5949         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5950         (ldt_info.limit & 0xf0000) |
5951         ((read_exec_only ^ 1) << 9) |
5952         (contents << 10) |
5953         ((seg_not_present ^ 1) << 15) |
5954         (seg_32bit << 22) |
5955         (limit_in_pages << 23) |
5956         (useable << 20) |
5957         (lm << 21) |
5958         0x7000;
5959 
5960     /* Install the new entry ...  */
5961 install:
5962     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5963     lp[0] = tswap32(entry_1);
5964     lp[1] = tswap32(entry_2);
5965     return 0;
5966 }
5967 
5968 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5969 {
5970     struct target_modify_ldt_ldt_s *target_ldt_info;
5971     uint64_t *gdt_table = g2h(env->gdt.base);
5972     uint32_t base_addr, limit, flags;
5973     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5974     int seg_not_present, useable, lm;
5975     uint32_t *lp, entry_1, entry_2;
5976 
5977     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5978     if (!target_ldt_info)
5979         return -TARGET_EFAULT;
5980     idx = tswap32(target_ldt_info->entry_number);
5981     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5982         idx > TARGET_GDT_ENTRY_TLS_MAX) {
5983         unlock_user_struct(target_ldt_info, ptr, 1);
5984         return -TARGET_EINVAL;
5985     }
5986     lp = (uint32_t *)(gdt_table + idx);
5987     entry_1 = tswap32(lp[0]);
5988     entry_2 = tswap32(lp[1]);
5989 
5990     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5991     contents = (entry_2 >> 10) & 3;
5992     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5993     seg_32bit = (entry_2 >> 22) & 1;
5994     limit_in_pages = (entry_2 >> 23) & 1;
5995     useable = (entry_2 >> 20) & 1;
5996 #ifdef TARGET_ABI32
5997     lm = 0;
5998 #else
5999     lm = (entry_2 >> 21) & 1;
6000 #endif
6001     flags = (seg_32bit << 0) | (contents << 1) |
6002         (read_exec_only << 3) | (limit_in_pages << 4) |
6003         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6004     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6005     base_addr = (entry_1 >> 16) |
6006         (entry_2 & 0xff000000) |
6007         ((entry_2 & 0xff) << 16);
6008     target_ldt_info->base_addr = tswapal(base_addr);
6009     target_ldt_info->limit = tswap32(limit);
6010     target_ldt_info->flags = tswap32(flags);
6011     unlock_user_struct(target_ldt_info, ptr, 1);
6012     return 0;
6013 }
6014 
6015 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6016 {
6017     return -TARGET_ENOSYS;
6018 }
6019 #else
6020 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6021 {
6022     abi_long ret = 0;
6023     abi_ulong val;
6024     int idx;
6025 
6026     switch(code) {
6027     case TARGET_ARCH_SET_GS:
6028     case TARGET_ARCH_SET_FS:
6029         if (code == TARGET_ARCH_SET_GS)
6030             idx = R_GS;
6031         else
6032             idx = R_FS;
6033         cpu_x86_load_seg(env, idx, 0);
6034         env->segs[idx].base = addr;
6035         break;
6036     case TARGET_ARCH_GET_GS:
6037     case TARGET_ARCH_GET_FS:
6038         if (code == TARGET_ARCH_GET_GS)
6039             idx = R_GS;
6040         else
6041             idx = R_FS;
6042         val = env->segs[idx].base;
6043         if (put_user(val, addr, abi_ulong))
6044             ret = -TARGET_EFAULT;
6045         break;
6046     default:
6047         ret = -TARGET_EINVAL;
6048         break;
6049     }
6050     return ret;
6051 }
6052 #endif /* defined(TARGET_ABI32 */
6053 
6054 #endif /* defined(TARGET_I386) */
6055 
6056 #define NEW_STACK_SIZE 0x40000
6057 
6058 
6059 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6060 typedef struct {
6061     CPUArchState *env;
6062     pthread_mutex_t mutex;
6063     pthread_cond_t cond;
6064     pthread_t thread;
6065     uint32_t tid;
6066     abi_ulong child_tidptr;
6067     abi_ulong parent_tidptr;
6068     sigset_t sigmask;
6069 } new_thread_info;
6070 
6071 static void *clone_func(void *arg)
6072 {
6073     new_thread_info *info = arg;
6074     CPUArchState *env;
6075     CPUState *cpu;
6076     TaskState *ts;
6077 
6078     rcu_register_thread();
6079     tcg_register_thread();
6080     env = info->env;
6081     cpu = env_cpu(env);
6082     thread_cpu = cpu;
6083     ts = (TaskState *)cpu->opaque;
6084     info->tid = sys_gettid();
6085     task_settid(ts);
6086     if (info->child_tidptr)
6087         put_user_u32(info->tid, info->child_tidptr);
6088     if (info->parent_tidptr)
6089         put_user_u32(info->tid, info->parent_tidptr);
6090     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6091     /* Enable signals.  */
6092     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6093     /* Signal to the parent that we're ready.  */
6094     pthread_mutex_lock(&info->mutex);
6095     pthread_cond_broadcast(&info->cond);
6096     pthread_mutex_unlock(&info->mutex);
6097     /* Wait until the parent has finished initializing the tls state.  */
6098     pthread_mutex_lock(&clone_lock);
6099     pthread_mutex_unlock(&clone_lock);
6100     cpu_loop(env);
6101     /* never exits */
6102     return NULL;
6103 }
6104 
6105 /* do_fork() Must return host values and target errnos (unlike most
6106    do_*() functions). */
6107 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6108                    abi_ulong parent_tidptr, target_ulong newtls,
6109                    abi_ulong child_tidptr)
6110 {
6111     CPUState *cpu = env_cpu(env);
6112     int ret;
6113     TaskState *ts;
6114     CPUState *new_cpu;
6115     CPUArchState *new_env;
6116     sigset_t sigmask;
6117 
6118     flags &= ~CLONE_IGNORED_FLAGS;
6119 
6120     /* Emulate vfork() with fork() */
6121     if (flags & CLONE_VFORK)
6122         flags &= ~(CLONE_VFORK | CLONE_VM);
6123 
6124     if (flags & CLONE_VM) {
6125         TaskState *parent_ts = (TaskState *)cpu->opaque;
6126         new_thread_info info;
6127         pthread_attr_t attr;
6128 
6129         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6130             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6131             return -TARGET_EINVAL;
6132         }
6133 
6134         ts = g_new0(TaskState, 1);
6135         init_task_state(ts);
6136 
6137         /* Grab a mutex so that thread setup appears atomic.  */
6138         pthread_mutex_lock(&clone_lock);
6139 
6140         /* we create a new CPU instance. */
6141         new_env = cpu_copy(env);
6142         /* Init regs that differ from the parent.  */
6143         cpu_clone_regs_child(new_env, newsp, flags);
6144         cpu_clone_regs_parent(env, flags);
6145         new_cpu = env_cpu(new_env);
6146         new_cpu->opaque = ts;
6147         ts->bprm = parent_ts->bprm;
6148         ts->info = parent_ts->info;
6149         ts->signal_mask = parent_ts->signal_mask;
6150 
6151         if (flags & CLONE_CHILD_CLEARTID) {
6152             ts->child_tidptr = child_tidptr;
6153         }
6154 
6155         if (flags & CLONE_SETTLS) {
6156             cpu_set_tls (new_env, newtls);
6157         }
6158 
6159         memset(&info, 0, sizeof(info));
6160         pthread_mutex_init(&info.mutex, NULL);
6161         pthread_mutex_lock(&info.mutex);
6162         pthread_cond_init(&info.cond, NULL);
6163         info.env = new_env;
6164         if (flags & CLONE_CHILD_SETTID) {
6165             info.child_tidptr = child_tidptr;
6166         }
6167         if (flags & CLONE_PARENT_SETTID) {
6168             info.parent_tidptr = parent_tidptr;
6169         }
6170 
6171         ret = pthread_attr_init(&attr);
6172         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6173         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6174         /* It is not safe to deliver signals until the child has finished
6175            initializing, so temporarily block all signals.  */
6176         sigfillset(&sigmask);
6177         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6178         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6179 
6180         /* If this is our first additional thread, we need to ensure we
6181          * generate code for parallel execution and flush old translations.
6182          */
6183         if (!parallel_cpus) {
6184             parallel_cpus = true;
6185             tb_flush(cpu);
6186         }
6187 
6188         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6189         /* TODO: Free new CPU state if thread creation failed.  */
6190 
6191         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6192         pthread_attr_destroy(&attr);
6193         if (ret == 0) {
6194             /* Wait for the child to initialize.  */
6195             pthread_cond_wait(&info.cond, &info.mutex);
6196             ret = info.tid;
6197         } else {
6198             ret = -1;
6199         }
6200         pthread_mutex_unlock(&info.mutex);
6201         pthread_cond_destroy(&info.cond);
6202         pthread_mutex_destroy(&info.mutex);
6203         pthread_mutex_unlock(&clone_lock);
6204     } else {
6205         /* if no CLONE_VM, we consider it is a fork */
6206         if (flags & CLONE_INVALID_FORK_FLAGS) {
6207             return -TARGET_EINVAL;
6208         }
6209 
6210         /* We can't support custom termination signals */
6211         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6212             return -TARGET_EINVAL;
6213         }
6214 
6215         if (block_signals()) {
6216             return -TARGET_ERESTARTSYS;
6217         }
6218 
6219         fork_start();
6220         ret = fork();
6221         if (ret == 0) {
6222             /* Child Process.  */
6223             cpu_clone_regs_child(env, newsp, flags);
6224             fork_end(1);
6225             /* There is a race condition here.  The parent process could
6226                theoretically read the TID in the child process before the child
6227                tid is set.  This would require using either ptrace
6228                (not implemented) or having *_tidptr to point at a shared memory
6229                mapping.  We can't repeat the spinlock hack used above because
6230                the child process gets its own copy of the lock.  */
6231             if (flags & CLONE_CHILD_SETTID)
6232                 put_user_u32(sys_gettid(), child_tidptr);
6233             if (flags & CLONE_PARENT_SETTID)
6234                 put_user_u32(sys_gettid(), parent_tidptr);
6235             ts = (TaskState *)cpu->opaque;
6236             if (flags & CLONE_SETTLS)
6237                 cpu_set_tls (env, newtls);
6238             if (flags & CLONE_CHILD_CLEARTID)
6239                 ts->child_tidptr = child_tidptr;
6240         } else {
6241             cpu_clone_regs_parent(env, flags);
6242             fork_end(0);
6243         }
6244     }
6245     return ret;
6246 }
6247 
6248 /* warning : doesn't handle linux specific flags... */
6249 static int target_to_host_fcntl_cmd(int cmd)
6250 {
6251     int ret;
6252 
6253     switch(cmd) {
6254     case TARGET_F_DUPFD:
6255     case TARGET_F_GETFD:
6256     case TARGET_F_SETFD:
6257     case TARGET_F_GETFL:
6258     case TARGET_F_SETFL:
6259     case TARGET_F_OFD_GETLK:
6260     case TARGET_F_OFD_SETLK:
6261     case TARGET_F_OFD_SETLKW:
6262         ret = cmd;
6263         break;
6264     case TARGET_F_GETLK:
6265         ret = F_GETLK64;
6266         break;
6267     case TARGET_F_SETLK:
6268         ret = F_SETLK64;
6269         break;
6270     case TARGET_F_SETLKW:
6271         ret = F_SETLKW64;
6272         break;
6273     case TARGET_F_GETOWN:
6274         ret = F_GETOWN;
6275         break;
6276     case TARGET_F_SETOWN:
6277         ret = F_SETOWN;
6278         break;
6279     case TARGET_F_GETSIG:
6280         ret = F_GETSIG;
6281         break;
6282     case TARGET_F_SETSIG:
6283         ret = F_SETSIG;
6284         break;
6285 #if TARGET_ABI_BITS == 32
6286     case TARGET_F_GETLK64:
6287         ret = F_GETLK64;
6288         break;
6289     case TARGET_F_SETLK64:
6290         ret = F_SETLK64;
6291         break;
6292     case TARGET_F_SETLKW64:
6293         ret = F_SETLKW64;
6294         break;
6295 #endif
6296     case TARGET_F_SETLEASE:
6297         ret = F_SETLEASE;
6298         break;
6299     case TARGET_F_GETLEASE:
6300         ret = F_GETLEASE;
6301         break;
6302 #ifdef F_DUPFD_CLOEXEC
6303     case TARGET_F_DUPFD_CLOEXEC:
6304         ret = F_DUPFD_CLOEXEC;
6305         break;
6306 #endif
6307     case TARGET_F_NOTIFY:
6308         ret = F_NOTIFY;
6309         break;
6310 #ifdef F_GETOWN_EX
6311     case TARGET_F_GETOWN_EX:
6312         ret = F_GETOWN_EX;
6313         break;
6314 #endif
6315 #ifdef F_SETOWN_EX
6316     case TARGET_F_SETOWN_EX:
6317         ret = F_SETOWN_EX;
6318         break;
6319 #endif
6320 #ifdef F_SETPIPE_SZ
6321     case TARGET_F_SETPIPE_SZ:
6322         ret = F_SETPIPE_SZ;
6323         break;
6324     case TARGET_F_GETPIPE_SZ:
6325         ret = F_GETPIPE_SZ;
6326         break;
6327 #endif
6328     default:
6329         ret = -TARGET_EINVAL;
6330         break;
6331     }
6332 
6333 #if defined(__powerpc64__)
6334     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6335      * is not supported by kernel. The glibc fcntl call actually adjusts
6336      * them to 5, 6 and 7 before making the syscall(). Since we make the
6337      * syscall directly, adjust to what is supported by the kernel.
6338      */
6339     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6340         ret -= F_GETLK64 - 5;
6341     }
6342 #endif
6343 
6344     return ret;
6345 }
6346 
6347 #define FLOCK_TRANSTBL \
6348     switch (type) { \
6349     TRANSTBL_CONVERT(F_RDLCK); \
6350     TRANSTBL_CONVERT(F_WRLCK); \
6351     TRANSTBL_CONVERT(F_UNLCK); \
6352     TRANSTBL_CONVERT(F_EXLCK); \
6353     TRANSTBL_CONVERT(F_SHLCK); \
6354     }
6355 
6356 static int target_to_host_flock(int type)
6357 {
6358 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6359     FLOCK_TRANSTBL
6360 #undef  TRANSTBL_CONVERT
6361     return -TARGET_EINVAL;
6362 }
6363 
6364 static int host_to_target_flock(int type)
6365 {
6366 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6367     FLOCK_TRANSTBL
6368 #undef  TRANSTBL_CONVERT
6369     /* if we don't know how to convert the value coming
6370      * from the host we copy to the target field as-is
6371      */
6372     return type;
6373 }
6374 
6375 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6376                                             abi_ulong target_flock_addr)
6377 {
6378     struct target_flock *target_fl;
6379     int l_type;
6380 
6381     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6382         return -TARGET_EFAULT;
6383     }
6384 
6385     __get_user(l_type, &target_fl->l_type);
6386     l_type = target_to_host_flock(l_type);
6387     if (l_type < 0) {
6388         return l_type;
6389     }
6390     fl->l_type = l_type;
6391     __get_user(fl->l_whence, &target_fl->l_whence);
6392     __get_user(fl->l_start, &target_fl->l_start);
6393     __get_user(fl->l_len, &target_fl->l_len);
6394     __get_user(fl->l_pid, &target_fl->l_pid);
6395     unlock_user_struct(target_fl, target_flock_addr, 0);
6396     return 0;
6397 }
6398 
6399 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6400                                           const struct flock64 *fl)
6401 {
6402     struct target_flock *target_fl;
6403     short l_type;
6404 
6405     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6406         return -TARGET_EFAULT;
6407     }
6408 
6409     l_type = host_to_target_flock(fl->l_type);
6410     __put_user(l_type, &target_fl->l_type);
6411     __put_user(fl->l_whence, &target_fl->l_whence);
6412     __put_user(fl->l_start, &target_fl->l_start);
6413     __put_user(fl->l_len, &target_fl->l_len);
6414     __put_user(fl->l_pid, &target_fl->l_pid);
6415     unlock_user_struct(target_fl, target_flock_addr, 1);
6416     return 0;
6417 }
6418 
6419 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6420 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6421 
6422 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6423 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6424                                                    abi_ulong target_flock_addr)
6425 {
6426     struct target_oabi_flock64 *target_fl;
6427     int l_type;
6428 
6429     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6430         return -TARGET_EFAULT;
6431     }
6432 
6433     __get_user(l_type, &target_fl->l_type);
6434     l_type = target_to_host_flock(l_type);
6435     if (l_type < 0) {
6436         return l_type;
6437     }
6438     fl->l_type = l_type;
6439     __get_user(fl->l_whence, &target_fl->l_whence);
6440     __get_user(fl->l_start, &target_fl->l_start);
6441     __get_user(fl->l_len, &target_fl->l_len);
6442     __get_user(fl->l_pid, &target_fl->l_pid);
6443     unlock_user_struct(target_fl, target_flock_addr, 0);
6444     return 0;
6445 }
6446 
6447 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6448                                                  const struct flock64 *fl)
6449 {
6450     struct target_oabi_flock64 *target_fl;
6451     short l_type;
6452 
6453     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6454         return -TARGET_EFAULT;
6455     }
6456 
6457     l_type = host_to_target_flock(fl->l_type);
6458     __put_user(l_type, &target_fl->l_type);
6459     __put_user(fl->l_whence, &target_fl->l_whence);
6460     __put_user(fl->l_start, &target_fl->l_start);
6461     __put_user(fl->l_len, &target_fl->l_len);
6462     __put_user(fl->l_pid, &target_fl->l_pid);
6463     unlock_user_struct(target_fl, target_flock_addr, 1);
6464     return 0;
6465 }
6466 #endif
6467 
6468 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6469                                               abi_ulong target_flock_addr)
6470 {
6471     struct target_flock64 *target_fl;
6472     int l_type;
6473 
6474     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6475         return -TARGET_EFAULT;
6476     }
6477 
6478     __get_user(l_type, &target_fl->l_type);
6479     l_type = target_to_host_flock(l_type);
6480     if (l_type < 0) {
6481         return l_type;
6482     }
6483     fl->l_type = l_type;
6484     __get_user(fl->l_whence, &target_fl->l_whence);
6485     __get_user(fl->l_start, &target_fl->l_start);
6486     __get_user(fl->l_len, &target_fl->l_len);
6487     __get_user(fl->l_pid, &target_fl->l_pid);
6488     unlock_user_struct(target_fl, target_flock_addr, 0);
6489     return 0;
6490 }
6491 
6492 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6493                                             const struct flock64 *fl)
6494 {
6495     struct target_flock64 *target_fl;
6496     short l_type;
6497 
6498     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6499         return -TARGET_EFAULT;
6500     }
6501 
6502     l_type = host_to_target_flock(fl->l_type);
6503     __put_user(l_type, &target_fl->l_type);
6504     __put_user(fl->l_whence, &target_fl->l_whence);
6505     __put_user(fl->l_start, &target_fl->l_start);
6506     __put_user(fl->l_len, &target_fl->l_len);
6507     __put_user(fl->l_pid, &target_fl->l_pid);
6508     unlock_user_struct(target_fl, target_flock_addr, 1);
6509     return 0;
6510 }
6511 
6512 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6513 {
6514     struct flock64 fl64;
6515 #ifdef F_GETOWN_EX
6516     struct f_owner_ex fox;
6517     struct target_f_owner_ex *target_fox;
6518 #endif
6519     abi_long ret;
6520     int host_cmd = target_to_host_fcntl_cmd(cmd);
6521 
6522     if (host_cmd == -TARGET_EINVAL)
6523 	    return host_cmd;
6524 
6525     switch(cmd) {
6526     case TARGET_F_GETLK:
6527         ret = copy_from_user_flock(&fl64, arg);
6528         if (ret) {
6529             return ret;
6530         }
6531         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6532         if (ret == 0) {
6533             ret = copy_to_user_flock(arg, &fl64);
6534         }
6535         break;
6536 
6537     case TARGET_F_SETLK:
6538     case TARGET_F_SETLKW:
6539         ret = copy_from_user_flock(&fl64, arg);
6540         if (ret) {
6541             return ret;
6542         }
6543         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6544         break;
6545 
6546     case TARGET_F_GETLK64:
6547     case TARGET_F_OFD_GETLK:
6548         ret = copy_from_user_flock64(&fl64, arg);
6549         if (ret) {
6550             return ret;
6551         }
6552         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6553         if (ret == 0) {
6554             ret = copy_to_user_flock64(arg, &fl64);
6555         }
6556         break;
6557     case TARGET_F_SETLK64:
6558     case TARGET_F_SETLKW64:
6559     case TARGET_F_OFD_SETLK:
6560     case TARGET_F_OFD_SETLKW:
6561         ret = copy_from_user_flock64(&fl64, arg);
6562         if (ret) {
6563             return ret;
6564         }
6565         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6566         break;
6567 
6568     case TARGET_F_GETFL:
6569         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6570         if (ret >= 0) {
6571             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6572         }
6573         break;
6574 
6575     case TARGET_F_SETFL:
6576         ret = get_errno(safe_fcntl(fd, host_cmd,
6577                                    target_to_host_bitmask(arg,
6578                                                           fcntl_flags_tbl)));
6579         break;
6580 
6581 #ifdef F_GETOWN_EX
6582     case TARGET_F_GETOWN_EX:
6583         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6584         if (ret >= 0) {
6585             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6586                 return -TARGET_EFAULT;
6587             target_fox->type = tswap32(fox.type);
6588             target_fox->pid = tswap32(fox.pid);
6589             unlock_user_struct(target_fox, arg, 1);
6590         }
6591         break;
6592 #endif
6593 
6594 #ifdef F_SETOWN_EX
6595     case TARGET_F_SETOWN_EX:
6596         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6597             return -TARGET_EFAULT;
6598         fox.type = tswap32(target_fox->type);
6599         fox.pid = tswap32(target_fox->pid);
6600         unlock_user_struct(target_fox, arg, 0);
6601         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6602         break;
6603 #endif
6604 
6605     case TARGET_F_SETOWN:
6606     case TARGET_F_GETOWN:
6607     case TARGET_F_SETSIG:
6608     case TARGET_F_GETSIG:
6609     case TARGET_F_SETLEASE:
6610     case TARGET_F_GETLEASE:
6611     case TARGET_F_SETPIPE_SZ:
6612     case TARGET_F_GETPIPE_SZ:
6613         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6614         break;
6615 
6616     default:
6617         ret = get_errno(safe_fcntl(fd, cmd, arg));
6618         break;
6619     }
6620     return ret;
6621 }
6622 
6623 #ifdef USE_UID16
6624 
6625 static inline int high2lowuid(int uid)
6626 {
6627     if (uid > 65535)
6628         return 65534;
6629     else
6630         return uid;
6631 }
6632 
6633 static inline int high2lowgid(int gid)
6634 {
6635     if (gid > 65535)
6636         return 65534;
6637     else
6638         return gid;
6639 }
6640 
6641 static inline int low2highuid(int uid)
6642 {
6643     if ((int16_t)uid == -1)
6644         return -1;
6645     else
6646         return uid;
6647 }
6648 
6649 static inline int low2highgid(int gid)
6650 {
6651     if ((int16_t)gid == -1)
6652         return -1;
6653     else
6654         return gid;
6655 }
6656 static inline int tswapid(int id)
6657 {
6658     return tswap16(id);
6659 }
6660 
6661 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6662 
6663 #else /* !USE_UID16 */
6664 static inline int high2lowuid(int uid)
6665 {
6666     return uid;
6667 }
6668 static inline int high2lowgid(int gid)
6669 {
6670     return gid;
6671 }
6672 static inline int low2highuid(int uid)
6673 {
6674     return uid;
6675 }
6676 static inline int low2highgid(int gid)
6677 {
6678     return gid;
6679 }
6680 static inline int tswapid(int id)
6681 {
6682     return tswap32(id);
6683 }
6684 
6685 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6686 
6687 #endif /* USE_UID16 */
6688 
6689 /* We must do direct syscalls for setting UID/GID, because we want to
6690  * implement the Linux system call semantics of "change only for this thread",
6691  * not the libc/POSIX semantics of "change for all threads in process".
6692  * (See http://ewontfix.com/17/ for more details.)
6693  * We use the 32-bit version of the syscalls if present; if it is not
6694  * then either the host architecture supports 32-bit UIDs natively with
6695  * the standard syscall, or the 16-bit UID is the best we can do.
6696  */
6697 #ifdef __NR_setuid32
6698 #define __NR_sys_setuid __NR_setuid32
6699 #else
6700 #define __NR_sys_setuid __NR_setuid
6701 #endif
6702 #ifdef __NR_setgid32
6703 #define __NR_sys_setgid __NR_setgid32
6704 #else
6705 #define __NR_sys_setgid __NR_setgid
6706 #endif
6707 #ifdef __NR_setresuid32
6708 #define __NR_sys_setresuid __NR_setresuid32
6709 #else
6710 #define __NR_sys_setresuid __NR_setresuid
6711 #endif
6712 #ifdef __NR_setresgid32
6713 #define __NR_sys_setresgid __NR_setresgid32
6714 #else
6715 #define __NR_sys_setresgid __NR_setresgid
6716 #endif
6717 
6718 _syscall1(int, sys_setuid, uid_t, uid)
6719 _syscall1(int, sys_setgid, gid_t, gid)
6720 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6721 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6722 
6723 void syscall_init(void)
6724 {
6725     IOCTLEntry *ie;
6726     const argtype *arg_type;
6727     int size;
6728     int i;
6729 
6730     thunk_init(STRUCT_MAX);
6731 
6732 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6733 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6734 #include "syscall_types.h"
6735 #undef STRUCT
6736 #undef STRUCT_SPECIAL
6737 
6738     /* Build target_to_host_errno_table[] table from
6739      * host_to_target_errno_table[]. */
6740     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6741         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6742     }
6743 
6744     /* we patch the ioctl size if necessary. We rely on the fact that
6745        no ioctl has all the bits at '1' in the size field */
6746     ie = ioctl_entries;
6747     while (ie->target_cmd != 0) {
6748         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6749             TARGET_IOC_SIZEMASK) {
6750             arg_type = ie->arg_type;
6751             if (arg_type[0] != TYPE_PTR) {
6752                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6753                         ie->target_cmd);
6754                 exit(1);
6755             }
6756             arg_type++;
6757             size = thunk_type_size(arg_type, 0);
6758             ie->target_cmd = (ie->target_cmd &
6759                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6760                 (size << TARGET_IOC_SIZESHIFT);
6761         }
6762 
6763         /* automatic consistency check if same arch */
6764 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6765     (defined(__x86_64__) && defined(TARGET_X86_64))
6766         if (unlikely(ie->target_cmd != ie->host_cmd)) {
6767             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6768                     ie->name, ie->target_cmd, ie->host_cmd);
6769         }
6770 #endif
6771         ie++;
6772     }
6773 }
6774 
6775 #ifdef TARGET_NR_truncate64
6776 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6777                                          abi_long arg2,
6778                                          abi_long arg3,
6779                                          abi_long arg4)
6780 {
6781     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6782         arg2 = arg3;
6783         arg3 = arg4;
6784     }
6785     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6786 }
6787 #endif
6788 
6789 #ifdef TARGET_NR_ftruncate64
6790 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6791                                           abi_long arg2,
6792                                           abi_long arg3,
6793                                           abi_long arg4)
6794 {
6795     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6796         arg2 = arg3;
6797         arg3 = arg4;
6798     }
6799     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6800 }
6801 #endif
6802 
6803 #if defined(TARGET_NR_timer_settime) || \
6804     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6805 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6806                                                  abi_ulong target_addr)
6807 {
6808     if (target_to_host_timespec(&host_its->it_interval, target_addr +
6809                                 offsetof(struct target_itimerspec,
6810                                          it_interval)) ||
6811         target_to_host_timespec(&host_its->it_value, target_addr +
6812                                 offsetof(struct target_itimerspec,
6813                                          it_value))) {
6814         return -TARGET_EFAULT;
6815     }
6816 
6817     return 0;
6818 }
6819 #endif
6820 
6821 #if defined(TARGET_NR_timer_settime64) || \
6822     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6823 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6824                                                    abi_ulong target_addr)
6825 {
6826     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6827                                   offsetof(struct target__kernel_itimerspec,
6828                                            it_interval)) ||
6829         target_to_host_timespec64(&host_its->it_value, target_addr +
6830                                   offsetof(struct target__kernel_itimerspec,
6831                                            it_value))) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     return 0;
6836 }
6837 #endif
6838 
6839 #if ((defined(TARGET_NR_timerfd_gettime) || \
6840       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6841       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6842 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6843                                                  struct itimerspec *host_its)
6844 {
6845     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6846                                                        it_interval),
6847                                 &host_its->it_interval) ||
6848         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6849                                                        it_value),
6850                                 &host_its->it_value)) {
6851         return -TARGET_EFAULT;
6852     }
6853     return 0;
6854 }
6855 #endif
6856 
6857 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6858       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6859       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6860 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6861                                                    struct itimerspec *host_its)
6862 {
6863     if (host_to_target_timespec64(target_addr +
6864                                   offsetof(struct target__kernel_itimerspec,
6865                                            it_interval),
6866                                   &host_its->it_interval) ||
6867         host_to_target_timespec64(target_addr +
6868                                   offsetof(struct target__kernel_itimerspec,
6869                                            it_value),
6870                                   &host_its->it_value)) {
6871         return -TARGET_EFAULT;
6872     }
6873     return 0;
6874 }
6875 #endif
6876 
6877 #if defined(TARGET_NR_adjtimex) || \
6878     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6879 static inline abi_long target_to_host_timex(struct timex *host_tx,
6880                                             abi_long target_addr)
6881 {
6882     struct target_timex *target_tx;
6883 
6884     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6885         return -TARGET_EFAULT;
6886     }
6887 
6888     __get_user(host_tx->modes, &target_tx->modes);
6889     __get_user(host_tx->offset, &target_tx->offset);
6890     __get_user(host_tx->freq, &target_tx->freq);
6891     __get_user(host_tx->maxerror, &target_tx->maxerror);
6892     __get_user(host_tx->esterror, &target_tx->esterror);
6893     __get_user(host_tx->status, &target_tx->status);
6894     __get_user(host_tx->constant, &target_tx->constant);
6895     __get_user(host_tx->precision, &target_tx->precision);
6896     __get_user(host_tx->tolerance, &target_tx->tolerance);
6897     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6898     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6899     __get_user(host_tx->tick, &target_tx->tick);
6900     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6901     __get_user(host_tx->jitter, &target_tx->jitter);
6902     __get_user(host_tx->shift, &target_tx->shift);
6903     __get_user(host_tx->stabil, &target_tx->stabil);
6904     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6905     __get_user(host_tx->calcnt, &target_tx->calcnt);
6906     __get_user(host_tx->errcnt, &target_tx->errcnt);
6907     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6908     __get_user(host_tx->tai, &target_tx->tai);
6909 
6910     unlock_user_struct(target_tx, target_addr, 0);
6911     return 0;
6912 }
6913 
6914 static inline abi_long host_to_target_timex(abi_long target_addr,
6915                                             struct timex *host_tx)
6916 {
6917     struct target_timex *target_tx;
6918 
6919     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6920         return -TARGET_EFAULT;
6921     }
6922 
6923     __put_user(host_tx->modes, &target_tx->modes);
6924     __put_user(host_tx->offset, &target_tx->offset);
6925     __put_user(host_tx->freq, &target_tx->freq);
6926     __put_user(host_tx->maxerror, &target_tx->maxerror);
6927     __put_user(host_tx->esterror, &target_tx->esterror);
6928     __put_user(host_tx->status, &target_tx->status);
6929     __put_user(host_tx->constant, &target_tx->constant);
6930     __put_user(host_tx->precision, &target_tx->precision);
6931     __put_user(host_tx->tolerance, &target_tx->tolerance);
6932     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6933     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6934     __put_user(host_tx->tick, &target_tx->tick);
6935     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6936     __put_user(host_tx->jitter, &target_tx->jitter);
6937     __put_user(host_tx->shift, &target_tx->shift);
6938     __put_user(host_tx->stabil, &target_tx->stabil);
6939     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6940     __put_user(host_tx->calcnt, &target_tx->calcnt);
6941     __put_user(host_tx->errcnt, &target_tx->errcnt);
6942     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6943     __put_user(host_tx->tai, &target_tx->tai);
6944 
6945     unlock_user_struct(target_tx, target_addr, 1);
6946     return 0;
6947 }
6948 #endif
6949 
6950 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6951                                                abi_ulong target_addr)
6952 {
6953     struct target_sigevent *target_sevp;
6954 
6955     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     /* This union is awkward on 64 bit systems because it has a 32 bit
6960      * integer and a pointer in it; we follow the conversion approach
6961      * used for handling sigval types in signal.c so the guest should get
6962      * the correct value back even if we did a 64 bit byteswap and it's
6963      * using the 32 bit integer.
6964      */
6965     host_sevp->sigev_value.sival_ptr =
6966         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6967     host_sevp->sigev_signo =
6968         target_to_host_signal(tswap32(target_sevp->sigev_signo));
6969     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6970     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6971 
6972     unlock_user_struct(target_sevp, target_addr, 1);
6973     return 0;
6974 }
6975 
6976 #if defined(TARGET_NR_mlockall)
6977 static inline int target_to_host_mlockall_arg(int arg)
6978 {
6979     int result = 0;
6980 
6981     if (arg & TARGET_MCL_CURRENT) {
6982         result |= MCL_CURRENT;
6983     }
6984     if (arg & TARGET_MCL_FUTURE) {
6985         result |= MCL_FUTURE;
6986     }
6987 #ifdef MCL_ONFAULT
6988     if (arg & TARGET_MCL_ONFAULT) {
6989         result |= MCL_ONFAULT;
6990     }
6991 #endif
6992 
6993     return result;
6994 }
6995 #endif
6996 
6997 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6998      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6999      defined(TARGET_NR_newfstatat))
7000 static inline abi_long host_to_target_stat64(void *cpu_env,
7001                                              abi_ulong target_addr,
7002                                              struct stat *host_st)
7003 {
7004 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7005     if (((CPUARMState *)cpu_env)->eabi) {
7006         struct target_eabi_stat64 *target_st;
7007 
7008         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7009             return -TARGET_EFAULT;
7010         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7011         __put_user(host_st->st_dev, &target_st->st_dev);
7012         __put_user(host_st->st_ino, &target_st->st_ino);
7013 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7014         __put_user(host_st->st_ino, &target_st->__st_ino);
7015 #endif
7016         __put_user(host_st->st_mode, &target_st->st_mode);
7017         __put_user(host_st->st_nlink, &target_st->st_nlink);
7018         __put_user(host_st->st_uid, &target_st->st_uid);
7019         __put_user(host_st->st_gid, &target_st->st_gid);
7020         __put_user(host_st->st_rdev, &target_st->st_rdev);
7021         __put_user(host_st->st_size, &target_st->st_size);
7022         __put_user(host_st->st_blksize, &target_st->st_blksize);
7023         __put_user(host_st->st_blocks, &target_st->st_blocks);
7024         __put_user(host_st->st_atime, &target_st->target_st_atime);
7025         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7026         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7027 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7028         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7029         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7030         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7031 #endif
7032         unlock_user_struct(target_st, target_addr, 1);
7033     } else
7034 #endif
7035     {
7036 #if defined(TARGET_HAS_STRUCT_STAT64)
7037         struct target_stat64 *target_st;
7038 #else
7039         struct target_stat *target_st;
7040 #endif
7041 
7042         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7043             return -TARGET_EFAULT;
7044         memset(target_st, 0, sizeof(*target_st));
7045         __put_user(host_st->st_dev, &target_st->st_dev);
7046         __put_user(host_st->st_ino, &target_st->st_ino);
7047 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7048         __put_user(host_st->st_ino, &target_st->__st_ino);
7049 #endif
7050         __put_user(host_st->st_mode, &target_st->st_mode);
7051         __put_user(host_st->st_nlink, &target_st->st_nlink);
7052         __put_user(host_st->st_uid, &target_st->st_uid);
7053         __put_user(host_st->st_gid, &target_st->st_gid);
7054         __put_user(host_st->st_rdev, &target_st->st_rdev);
7055         /* XXX: better use of kernel struct */
7056         __put_user(host_st->st_size, &target_st->st_size);
7057         __put_user(host_st->st_blksize, &target_st->st_blksize);
7058         __put_user(host_st->st_blocks, &target_st->st_blocks);
7059         __put_user(host_st->st_atime, &target_st->target_st_atime);
7060         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7061         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7062 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7063         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7064         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7065         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7066 #endif
7067         unlock_user_struct(target_st, target_addr, 1);
7068     }
7069 
7070     return 0;
7071 }
7072 #endif
7073 
7074 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7075 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7076                                             abi_ulong target_addr)
7077 {
7078     struct target_statx *target_stx;
7079 
7080     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7081         return -TARGET_EFAULT;
7082     }
7083     memset(target_stx, 0, sizeof(*target_stx));
7084 
7085     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7086     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7087     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7088     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7089     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7090     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7091     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7092     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7093     __put_user(host_stx->stx_size, &target_stx->stx_size);
7094     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7095     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7096     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7097     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7098     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7099     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7100     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7101     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7102     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7103     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7104     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7105     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7106     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7107     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7108 
7109     unlock_user_struct(target_stx, target_addr, 1);
7110 
7111     return 0;
7112 }
7113 #endif
7114 
7115 static int do_sys_futex(int *uaddr, int op, int val,
7116                          const struct timespec *timeout, int *uaddr2,
7117                          int val3)
7118 {
7119 #if HOST_LONG_BITS == 64
7120 #if defined(__NR_futex)
7121     /* always a 64-bit time_t, it doesn't define _time64 version  */
7122     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7123 
7124 #endif
7125 #else /* HOST_LONG_BITS == 64 */
7126 #if defined(__NR_futex_time64)
7127     if (sizeof(timeout->tv_sec) == 8) {
7128         /* _time64 function on 32bit arch */
7129         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7130     }
7131 #endif
7132 #if defined(__NR_futex)
7133     /* old function on 32bit arch */
7134     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7135 #endif
7136 #endif /* HOST_LONG_BITS == 64 */
7137     g_assert_not_reached();
7138 }
7139 
7140 static int do_safe_futex(int *uaddr, int op, int val,
7141                          const struct timespec *timeout, int *uaddr2,
7142                          int val3)
7143 {
7144 #if HOST_LONG_BITS == 64
7145 #if defined(__NR_futex)
7146     /* always a 64-bit time_t, it doesn't define _time64 version  */
7147     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7148 #endif
7149 #else /* HOST_LONG_BITS == 64 */
7150 #if defined(__NR_futex_time64)
7151     if (sizeof(timeout->tv_sec) == 8) {
7152         /* _time64 function on 32bit arch */
7153         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7154                                            val3));
7155     }
7156 #endif
7157 #if defined(__NR_futex)
7158     /* old function on 32bit arch */
7159     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7160 #endif
7161 #endif /* HOST_LONG_BITS == 64 */
7162     return -TARGET_ENOSYS;
7163 }
7164 
7165 /* ??? Using host futex calls even when target atomic operations
7166    are not really atomic probably breaks things.  However implementing
7167    futexes locally would make futexes shared between multiple processes
7168    tricky.  However they're probably useless because guest atomic
7169    operations won't work either.  */
7170 #if defined(TARGET_NR_futex)
7171 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7172                     target_ulong uaddr2, int val3)
7173 {
7174     struct timespec ts, *pts;
7175     int base_op;
7176 
7177     /* ??? We assume FUTEX_* constants are the same on both host
7178        and target.  */
7179 #ifdef FUTEX_CMD_MASK
7180     base_op = op & FUTEX_CMD_MASK;
7181 #else
7182     base_op = op;
7183 #endif
7184     switch (base_op) {
7185     case FUTEX_WAIT:
7186     case FUTEX_WAIT_BITSET:
7187         if (timeout) {
7188             pts = &ts;
7189             target_to_host_timespec(pts, timeout);
7190         } else {
7191             pts = NULL;
7192         }
7193         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7194     case FUTEX_WAKE:
7195         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7196     case FUTEX_FD:
7197         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7198     case FUTEX_REQUEUE:
7199     case FUTEX_CMP_REQUEUE:
7200     case FUTEX_WAKE_OP:
7201         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7202            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7203            But the prototype takes a `struct timespec *'; insert casts
7204            to satisfy the compiler.  We do not need to tswap TIMEOUT
7205            since it's not compared to guest memory.  */
7206         pts = (struct timespec *)(uintptr_t) timeout;
7207         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7208                              (base_op == FUTEX_CMP_REQUEUE
7209                                       ? tswap32(val3)
7210                                       : val3));
7211     default:
7212         return -TARGET_ENOSYS;
7213     }
7214 }
7215 #endif
7216 
7217 #if defined(TARGET_NR_futex_time64)
7218 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7219                            target_ulong uaddr2, int val3)
7220 {
7221     struct timespec ts, *pts;
7222     int base_op;
7223 
7224     /* ??? We assume FUTEX_* constants are the same on both host
7225        and target.  */
7226 #ifdef FUTEX_CMD_MASK
7227     base_op = op & FUTEX_CMD_MASK;
7228 #else
7229     base_op = op;
7230 #endif
7231     switch (base_op) {
7232     case FUTEX_WAIT:
7233     case FUTEX_WAIT_BITSET:
7234         if (timeout) {
7235             pts = &ts;
7236             target_to_host_timespec64(pts, timeout);
7237         } else {
7238             pts = NULL;
7239         }
7240         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7241     case FUTEX_WAKE:
7242         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7243     case FUTEX_FD:
7244         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7245     case FUTEX_REQUEUE:
7246     case FUTEX_CMP_REQUEUE:
7247     case FUTEX_WAKE_OP:
7248         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7249            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7250            But the prototype takes a `struct timespec *'; insert casts
7251            to satisfy the compiler.  We do not need to tswap TIMEOUT
7252            since it's not compared to guest memory.  */
7253         pts = (struct timespec *)(uintptr_t) timeout;
7254         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7255                              (base_op == FUTEX_CMP_REQUEUE
7256                                       ? tswap32(val3)
7257                                       : val3));
7258     default:
7259         return -TARGET_ENOSYS;
7260     }
7261 }
7262 #endif
7263 
7264 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7265 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7266                                      abi_long handle, abi_long mount_id,
7267                                      abi_long flags)
7268 {
7269     struct file_handle *target_fh;
7270     struct file_handle *fh;
7271     int mid = 0;
7272     abi_long ret;
7273     char *name;
7274     unsigned int size, total_size;
7275 
7276     if (get_user_s32(size, handle)) {
7277         return -TARGET_EFAULT;
7278     }
7279 
7280     name = lock_user_string(pathname);
7281     if (!name) {
7282         return -TARGET_EFAULT;
7283     }
7284 
7285     total_size = sizeof(struct file_handle) + size;
7286     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7287     if (!target_fh) {
7288         unlock_user(name, pathname, 0);
7289         return -TARGET_EFAULT;
7290     }
7291 
7292     fh = g_malloc0(total_size);
7293     fh->handle_bytes = size;
7294 
7295     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7296     unlock_user(name, pathname, 0);
7297 
7298     /* man name_to_handle_at(2):
7299      * Other than the use of the handle_bytes field, the caller should treat
7300      * the file_handle structure as an opaque data type
7301      */
7302 
7303     memcpy(target_fh, fh, total_size);
7304     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7305     target_fh->handle_type = tswap32(fh->handle_type);
7306     g_free(fh);
7307     unlock_user(target_fh, handle, total_size);
7308 
7309     if (put_user_s32(mid, mount_id)) {
7310         return -TARGET_EFAULT;
7311     }
7312 
7313     return ret;
7314 
7315 }
7316 #endif
7317 
7318 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7319 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7320                                      abi_long flags)
7321 {
7322     struct file_handle *target_fh;
7323     struct file_handle *fh;
7324     unsigned int size, total_size;
7325     abi_long ret;
7326 
7327     if (get_user_s32(size, handle)) {
7328         return -TARGET_EFAULT;
7329     }
7330 
7331     total_size = sizeof(struct file_handle) + size;
7332     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7333     if (!target_fh) {
7334         return -TARGET_EFAULT;
7335     }
7336 
7337     fh = g_memdup(target_fh, total_size);
7338     fh->handle_bytes = size;
7339     fh->handle_type = tswap32(target_fh->handle_type);
7340 
7341     ret = get_errno(open_by_handle_at(mount_fd, fh,
7342                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7343 
7344     g_free(fh);
7345 
7346     unlock_user(target_fh, handle, total_size);
7347 
7348     return ret;
7349 }
7350 #endif
7351 
7352 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7353 
7354 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7355 {
7356     int host_flags;
7357     target_sigset_t *target_mask;
7358     sigset_t host_mask;
7359     abi_long ret;
7360 
7361     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7362         return -TARGET_EINVAL;
7363     }
7364     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7365         return -TARGET_EFAULT;
7366     }
7367 
7368     target_to_host_sigset(&host_mask, target_mask);
7369 
7370     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7371 
7372     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7373     if (ret >= 0) {
7374         fd_trans_register(ret, &target_signalfd_trans);
7375     }
7376 
7377     unlock_user_struct(target_mask, mask, 0);
7378 
7379     return ret;
7380 }
7381 #endif
7382 
7383 /* Map host to target signal numbers for the wait family of syscalls.
7384    Assume all other status bits are the same.  */
7385 int host_to_target_waitstatus(int status)
7386 {
7387     if (WIFSIGNALED(status)) {
7388         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7389     }
7390     if (WIFSTOPPED(status)) {
7391         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7392                | (status & 0xff);
7393     }
7394     return status;
7395 }
7396 
7397 static int open_self_cmdline(void *cpu_env, int fd)
7398 {
7399     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7400     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7401     int i;
7402 
7403     for (i = 0; i < bprm->argc; i++) {
7404         size_t len = strlen(bprm->argv[i]) + 1;
7405 
7406         if (write(fd, bprm->argv[i], len) != len) {
7407             return -1;
7408         }
7409     }
7410 
7411     return 0;
7412 }
7413 
7414 static int open_self_maps(void *cpu_env, int fd)
7415 {
7416     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7417     TaskState *ts = cpu->opaque;
7418     GSList *map_info = read_self_maps();
7419     GSList *s;
7420     int count;
7421 
7422     for (s = map_info; s; s = g_slist_next(s)) {
7423         MapInfo *e = (MapInfo *) s->data;
7424 
7425         if (h2g_valid(e->start)) {
7426             unsigned long min = e->start;
7427             unsigned long max = e->end;
7428             int flags = page_get_flags(h2g(min));
7429             const char *path;
7430 
7431             max = h2g_valid(max - 1) ?
7432                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7433 
7434             if (page_check_range(h2g(min), max - min, flags) == -1) {
7435                 continue;
7436             }
7437 
7438             if (h2g(min) == ts->info->stack_limit) {
7439                 path = "[stack]";
7440             } else {
7441                 path = e->path;
7442             }
7443 
7444             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7445                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7446                             h2g(min), h2g(max - 1) + 1,
7447                             e->is_read ? 'r' : '-',
7448                             e->is_write ? 'w' : '-',
7449                             e->is_exec ? 'x' : '-',
7450                             e->is_priv ? 'p' : '-',
7451                             (uint64_t) e->offset, e->dev, e->inode);
7452             if (path) {
7453                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7454             } else {
7455                 dprintf(fd, "\n");
7456             }
7457         }
7458     }
7459 
7460     free_self_maps(map_info);
7461 
7462 #ifdef TARGET_VSYSCALL_PAGE
7463     /*
7464      * We only support execution from the vsyscall page.
7465      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7466      */
7467     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7468                     " --xp 00000000 00:00 0",
7469                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7470     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7471 #endif
7472 
7473     return 0;
7474 }
7475 
7476 static int open_self_stat(void *cpu_env, int fd)
7477 {
7478     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7479     TaskState *ts = cpu->opaque;
7480     g_autoptr(GString) buf = g_string_new(NULL);
7481     int i;
7482 
7483     for (i = 0; i < 44; i++) {
7484         if (i == 0) {
7485             /* pid */
7486             g_string_printf(buf, FMT_pid " ", getpid());
7487         } else if (i == 1) {
7488             /* app name */
7489             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7490             bin = bin ? bin + 1 : ts->bprm->argv[0];
7491             g_string_printf(buf, "(%.15s) ", bin);
7492         } else if (i == 27) {
7493             /* stack bottom */
7494             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7495         } else {
7496             /* for the rest, there is MasterCard */
7497             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7498         }
7499 
7500         if (write(fd, buf->str, buf->len) != buf->len) {
7501             return -1;
7502         }
7503     }
7504 
7505     return 0;
7506 }
7507 
7508 static int open_self_auxv(void *cpu_env, int fd)
7509 {
7510     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7511     TaskState *ts = cpu->opaque;
7512     abi_ulong auxv = ts->info->saved_auxv;
7513     abi_ulong len = ts->info->auxv_len;
7514     char *ptr;
7515 
7516     /*
7517      * Auxiliary vector is stored in target process stack.
7518      * read in whole auxv vector and copy it to file
7519      */
7520     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7521     if (ptr != NULL) {
7522         while (len > 0) {
7523             ssize_t r;
7524             r = write(fd, ptr, len);
7525             if (r <= 0) {
7526                 break;
7527             }
7528             len -= r;
7529             ptr += r;
7530         }
7531         lseek(fd, 0, SEEK_SET);
7532         unlock_user(ptr, auxv, len);
7533     }
7534 
7535     return 0;
7536 }
7537 
7538 static int is_proc_myself(const char *filename, const char *entry)
7539 {
7540     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7541         filename += strlen("/proc/");
7542         if (!strncmp(filename, "self/", strlen("self/"))) {
7543             filename += strlen("self/");
7544         } else if (*filename >= '1' && *filename <= '9') {
7545             char myself[80];
7546             snprintf(myself, sizeof(myself), "%d/", getpid());
7547             if (!strncmp(filename, myself, strlen(myself))) {
7548                 filename += strlen(myself);
7549             } else {
7550                 return 0;
7551             }
7552         } else {
7553             return 0;
7554         }
7555         if (!strcmp(filename, entry)) {
7556             return 1;
7557         }
7558     }
7559     return 0;
7560 }
7561 
7562 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7563     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7564 static int is_proc(const char *filename, const char *entry)
7565 {
7566     return strcmp(filename, entry) == 0;
7567 }
7568 #endif
7569 
7570 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7571 static int open_net_route(void *cpu_env, int fd)
7572 {
7573     FILE *fp;
7574     char *line = NULL;
7575     size_t len = 0;
7576     ssize_t read;
7577 
7578     fp = fopen("/proc/net/route", "r");
7579     if (fp == NULL) {
7580         return -1;
7581     }
7582 
7583     /* read header */
7584 
7585     read = getline(&line, &len, fp);
7586     dprintf(fd, "%s", line);
7587 
7588     /* read routes */
7589 
7590     while ((read = getline(&line, &len, fp)) != -1) {
7591         char iface[16];
7592         uint32_t dest, gw, mask;
7593         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7594         int fields;
7595 
7596         fields = sscanf(line,
7597                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7598                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7599                         &mask, &mtu, &window, &irtt);
7600         if (fields != 11) {
7601             continue;
7602         }
7603         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7604                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7605                 metric, tswap32(mask), mtu, window, irtt);
7606     }
7607 
7608     free(line);
7609     fclose(fp);
7610 
7611     return 0;
7612 }
7613 #endif
7614 
7615 #if defined(TARGET_SPARC)
7616 static int open_cpuinfo(void *cpu_env, int fd)
7617 {
7618     dprintf(fd, "type\t\t: sun4u\n");
7619     return 0;
7620 }
7621 #endif
7622 
7623 #if defined(TARGET_HPPA)
7624 static int open_cpuinfo(void *cpu_env, int fd)
7625 {
7626     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7627     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7628     dprintf(fd, "capabilities\t: os32\n");
7629     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7630     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7631     return 0;
7632 }
7633 #endif
7634 
7635 #if defined(TARGET_M68K)
7636 static int open_hardware(void *cpu_env, int fd)
7637 {
7638     dprintf(fd, "Model:\t\tqemu-m68k\n");
7639     return 0;
7640 }
7641 #endif
7642 
7643 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7644 {
7645     struct fake_open {
7646         const char *filename;
7647         int (*fill)(void *cpu_env, int fd);
7648         int (*cmp)(const char *s1, const char *s2);
7649     };
7650     const struct fake_open *fake_open;
7651     static const struct fake_open fakes[] = {
7652         { "maps", open_self_maps, is_proc_myself },
7653         { "stat", open_self_stat, is_proc_myself },
7654         { "auxv", open_self_auxv, is_proc_myself },
7655         { "cmdline", open_self_cmdline, is_proc_myself },
7656 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7657         { "/proc/net/route", open_net_route, is_proc },
7658 #endif
7659 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7660         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7661 #endif
7662 #if defined(TARGET_M68K)
7663         { "/proc/hardware", open_hardware, is_proc },
7664 #endif
7665         { NULL, NULL, NULL }
7666     };
7667 
7668     if (is_proc_myself(pathname, "exe")) {
7669         int execfd = qemu_getauxval(AT_EXECFD);
7670         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7671     }
7672 
7673     for (fake_open = fakes; fake_open->filename; fake_open++) {
7674         if (fake_open->cmp(pathname, fake_open->filename)) {
7675             break;
7676         }
7677     }
7678 
7679     if (fake_open->filename) {
7680         const char *tmpdir;
7681         char filename[PATH_MAX];
7682         int fd, r;
7683 
7684         /* create temporary file to map stat to */
7685         tmpdir = getenv("TMPDIR");
7686         if (!tmpdir)
7687             tmpdir = "/tmp";
7688         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7689         fd = mkstemp(filename);
7690         if (fd < 0) {
7691             return fd;
7692         }
7693         unlink(filename);
7694 
7695         if ((r = fake_open->fill(cpu_env, fd))) {
7696             int e = errno;
7697             close(fd);
7698             errno = e;
7699             return r;
7700         }
7701         lseek(fd, 0, SEEK_SET);
7702 
7703         return fd;
7704     }
7705 
7706     return safe_openat(dirfd, path(pathname), flags, mode);
7707 }
7708 
7709 #define TIMER_MAGIC 0x0caf0000
7710 #define TIMER_MAGIC_MASK 0xffff0000
7711 
7712 /* Convert QEMU provided timer ID back to internal 16bit index format */
7713 static target_timer_t get_timer_id(abi_long arg)
7714 {
7715     target_timer_t timerid = arg;
7716 
7717     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7718         return -TARGET_EINVAL;
7719     }
7720 
7721     timerid &= 0xffff;
7722 
7723     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7724         return -TARGET_EINVAL;
7725     }
7726 
7727     return timerid;
7728 }
7729 
7730 static int target_to_host_cpu_mask(unsigned long *host_mask,
7731                                    size_t host_size,
7732                                    abi_ulong target_addr,
7733                                    size_t target_size)
7734 {
7735     unsigned target_bits = sizeof(abi_ulong) * 8;
7736     unsigned host_bits = sizeof(*host_mask) * 8;
7737     abi_ulong *target_mask;
7738     unsigned i, j;
7739 
7740     assert(host_size >= target_size);
7741 
7742     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7743     if (!target_mask) {
7744         return -TARGET_EFAULT;
7745     }
7746     memset(host_mask, 0, host_size);
7747 
7748     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7749         unsigned bit = i * target_bits;
7750         abi_ulong val;
7751 
7752         __get_user(val, &target_mask[i]);
7753         for (j = 0; j < target_bits; j++, bit++) {
7754             if (val & (1UL << j)) {
7755                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7756             }
7757         }
7758     }
7759 
7760     unlock_user(target_mask, target_addr, 0);
7761     return 0;
7762 }
7763 
7764 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7765                                    size_t host_size,
7766                                    abi_ulong target_addr,
7767                                    size_t target_size)
7768 {
7769     unsigned target_bits = sizeof(abi_ulong) * 8;
7770     unsigned host_bits = sizeof(*host_mask) * 8;
7771     abi_ulong *target_mask;
7772     unsigned i, j;
7773 
7774     assert(host_size >= target_size);
7775 
7776     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7777     if (!target_mask) {
7778         return -TARGET_EFAULT;
7779     }
7780 
7781     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7782         unsigned bit = i * target_bits;
7783         abi_ulong val = 0;
7784 
7785         for (j = 0; j < target_bits; j++, bit++) {
7786             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7787                 val |= 1UL << j;
7788             }
7789         }
7790         __put_user(val, &target_mask[i]);
7791     }
7792 
7793     unlock_user(target_mask, target_addr, target_size);
7794     return 0;
7795 }
7796 
7797 /* This is an internal helper for do_syscall so that it is easier
7798  * to have a single return point, so that actions, such as logging
7799  * of syscall results, can be performed.
7800  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7801  */
7802 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7803                             abi_long arg2, abi_long arg3, abi_long arg4,
7804                             abi_long arg5, abi_long arg6, abi_long arg7,
7805                             abi_long arg8)
7806 {
7807     CPUState *cpu = env_cpu(cpu_env);
7808     abi_long ret;
7809 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7810     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7811     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7812     || defined(TARGET_NR_statx)
7813     struct stat st;
7814 #endif
7815 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7816     || defined(TARGET_NR_fstatfs)
7817     struct statfs stfs;
7818 #endif
7819     void *p;
7820 
7821     switch(num) {
7822     case TARGET_NR_exit:
7823         /* In old applications this may be used to implement _exit(2).
7824            However in threaded applictions it is used for thread termination,
7825            and _exit_group is used for application termination.
7826            Do thread termination if we have more then one thread.  */
7827 
7828         if (block_signals()) {
7829             return -TARGET_ERESTARTSYS;
7830         }
7831 
7832         pthread_mutex_lock(&clone_lock);
7833 
7834         if (CPU_NEXT(first_cpu)) {
7835             TaskState *ts = cpu->opaque;
7836 
7837             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7838             object_unref(OBJECT(cpu));
7839             /*
7840              * At this point the CPU should be unrealized and removed
7841              * from cpu lists. We can clean-up the rest of the thread
7842              * data without the lock held.
7843              */
7844 
7845             pthread_mutex_unlock(&clone_lock);
7846 
7847             if (ts->child_tidptr) {
7848                 put_user_u32(0, ts->child_tidptr);
7849                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7850                           NULL, NULL, 0);
7851             }
7852             thread_cpu = NULL;
7853             g_free(ts);
7854             rcu_unregister_thread();
7855             pthread_exit(NULL);
7856         }
7857 
7858         pthread_mutex_unlock(&clone_lock);
7859         preexit_cleanup(cpu_env, arg1);
7860         _exit(arg1);
7861         return 0; /* avoid warning */
7862     case TARGET_NR_read:
7863         if (arg2 == 0 && arg3 == 0) {
7864             return get_errno(safe_read(arg1, 0, 0));
7865         } else {
7866             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7867                 return -TARGET_EFAULT;
7868             ret = get_errno(safe_read(arg1, p, arg3));
7869             if (ret >= 0 &&
7870                 fd_trans_host_to_target_data(arg1)) {
7871                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7872             }
7873             unlock_user(p, arg2, ret);
7874         }
7875         return ret;
7876     case TARGET_NR_write:
7877         if (arg2 == 0 && arg3 == 0) {
7878             return get_errno(safe_write(arg1, 0, 0));
7879         }
7880         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7881             return -TARGET_EFAULT;
7882         if (fd_trans_target_to_host_data(arg1)) {
7883             void *copy = g_malloc(arg3);
7884             memcpy(copy, p, arg3);
7885             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7886             if (ret >= 0) {
7887                 ret = get_errno(safe_write(arg1, copy, ret));
7888             }
7889             g_free(copy);
7890         } else {
7891             ret = get_errno(safe_write(arg1, p, arg3));
7892         }
7893         unlock_user(p, arg2, 0);
7894         return ret;
7895 
7896 #ifdef TARGET_NR_open
7897     case TARGET_NR_open:
7898         if (!(p = lock_user_string(arg1)))
7899             return -TARGET_EFAULT;
7900         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7901                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
7902                                   arg3));
7903         fd_trans_unregister(ret);
7904         unlock_user(p, arg1, 0);
7905         return ret;
7906 #endif
7907     case TARGET_NR_openat:
7908         if (!(p = lock_user_string(arg2)))
7909             return -TARGET_EFAULT;
7910         ret = get_errno(do_openat(cpu_env, arg1, p,
7911                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
7912                                   arg4));
7913         fd_trans_unregister(ret);
7914         unlock_user(p, arg2, 0);
7915         return ret;
7916 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7917     case TARGET_NR_name_to_handle_at:
7918         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7919         return ret;
7920 #endif
7921 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7922     case TARGET_NR_open_by_handle_at:
7923         ret = do_open_by_handle_at(arg1, arg2, arg3);
7924         fd_trans_unregister(ret);
7925         return ret;
7926 #endif
7927     case TARGET_NR_close:
7928         fd_trans_unregister(arg1);
7929         return get_errno(close(arg1));
7930 
7931     case TARGET_NR_brk:
7932         return do_brk(arg1);
7933 #ifdef TARGET_NR_fork
7934     case TARGET_NR_fork:
7935         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7936 #endif
7937 #ifdef TARGET_NR_waitpid
7938     case TARGET_NR_waitpid:
7939         {
7940             int status;
7941             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7942             if (!is_error(ret) && arg2 && ret
7943                 && put_user_s32(host_to_target_waitstatus(status), arg2))
7944                 return -TARGET_EFAULT;
7945         }
7946         return ret;
7947 #endif
7948 #ifdef TARGET_NR_waitid
7949     case TARGET_NR_waitid:
7950         {
7951             siginfo_t info;
7952             info.si_pid = 0;
7953             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7954             if (!is_error(ret) && arg3 && info.si_pid != 0) {
7955                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7956                     return -TARGET_EFAULT;
7957                 host_to_target_siginfo(p, &info);
7958                 unlock_user(p, arg3, sizeof(target_siginfo_t));
7959             }
7960         }
7961         return ret;
7962 #endif
7963 #ifdef TARGET_NR_creat /* not on alpha */
7964     case TARGET_NR_creat:
7965         if (!(p = lock_user_string(arg1)))
7966             return -TARGET_EFAULT;
7967         ret = get_errno(creat(p, arg2));
7968         fd_trans_unregister(ret);
7969         unlock_user(p, arg1, 0);
7970         return ret;
7971 #endif
7972 #ifdef TARGET_NR_link
7973     case TARGET_NR_link:
7974         {
7975             void * p2;
7976             p = lock_user_string(arg1);
7977             p2 = lock_user_string(arg2);
7978             if (!p || !p2)
7979                 ret = -TARGET_EFAULT;
7980             else
7981                 ret = get_errno(link(p, p2));
7982             unlock_user(p2, arg2, 0);
7983             unlock_user(p, arg1, 0);
7984         }
7985         return ret;
7986 #endif
7987 #if defined(TARGET_NR_linkat)
7988     case TARGET_NR_linkat:
7989         {
7990             void * p2 = NULL;
7991             if (!arg2 || !arg4)
7992                 return -TARGET_EFAULT;
7993             p  = lock_user_string(arg2);
7994             p2 = lock_user_string(arg4);
7995             if (!p || !p2)
7996                 ret = -TARGET_EFAULT;
7997             else
7998                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7999             unlock_user(p, arg2, 0);
8000             unlock_user(p2, arg4, 0);
8001         }
8002         return ret;
8003 #endif
8004 #ifdef TARGET_NR_unlink
8005     case TARGET_NR_unlink:
8006         if (!(p = lock_user_string(arg1)))
8007             return -TARGET_EFAULT;
8008         ret = get_errno(unlink(p));
8009         unlock_user(p, arg1, 0);
8010         return ret;
8011 #endif
8012 #if defined(TARGET_NR_unlinkat)
8013     case TARGET_NR_unlinkat:
8014         if (!(p = lock_user_string(arg2)))
8015             return -TARGET_EFAULT;
8016         ret = get_errno(unlinkat(arg1, p, arg3));
8017         unlock_user(p, arg2, 0);
8018         return ret;
8019 #endif
8020     case TARGET_NR_execve:
8021         {
8022             char **argp, **envp;
8023             int argc, envc;
8024             abi_ulong gp;
8025             abi_ulong guest_argp;
8026             abi_ulong guest_envp;
8027             abi_ulong addr;
8028             char **q;
8029             int total_size = 0;
8030 
8031             argc = 0;
8032             guest_argp = arg2;
8033             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8034                 if (get_user_ual(addr, gp))
8035                     return -TARGET_EFAULT;
8036                 if (!addr)
8037                     break;
8038                 argc++;
8039             }
8040             envc = 0;
8041             guest_envp = arg3;
8042             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8043                 if (get_user_ual(addr, gp))
8044                     return -TARGET_EFAULT;
8045                 if (!addr)
8046                     break;
8047                 envc++;
8048             }
8049 
8050             argp = g_new0(char *, argc + 1);
8051             envp = g_new0(char *, envc + 1);
8052 
8053             for (gp = guest_argp, q = argp; gp;
8054                   gp += sizeof(abi_ulong), q++) {
8055                 if (get_user_ual(addr, gp))
8056                     goto execve_efault;
8057                 if (!addr)
8058                     break;
8059                 if (!(*q = lock_user_string(addr)))
8060                     goto execve_efault;
8061                 total_size += strlen(*q) + 1;
8062             }
8063             *q = NULL;
8064 
8065             for (gp = guest_envp, q = envp; gp;
8066                   gp += sizeof(abi_ulong), q++) {
8067                 if (get_user_ual(addr, gp))
8068                     goto execve_efault;
8069                 if (!addr)
8070                     break;
8071                 if (!(*q = lock_user_string(addr)))
8072                     goto execve_efault;
8073                 total_size += strlen(*q) + 1;
8074             }
8075             *q = NULL;
8076 
8077             if (!(p = lock_user_string(arg1)))
8078                 goto execve_efault;
8079             /* Although execve() is not an interruptible syscall it is
8080              * a special case where we must use the safe_syscall wrapper:
8081              * if we allow a signal to happen before we make the host
8082              * syscall then we will 'lose' it, because at the point of
8083              * execve the process leaves QEMU's control. So we use the
8084              * safe syscall wrapper to ensure that we either take the
8085              * signal as a guest signal, or else it does not happen
8086              * before the execve completes and makes it the other
8087              * program's problem.
8088              */
8089             ret = get_errno(safe_execve(p, argp, envp));
8090             unlock_user(p, arg1, 0);
8091 
8092             goto execve_end;
8093 
8094         execve_efault:
8095             ret = -TARGET_EFAULT;
8096 
8097         execve_end:
8098             for (gp = guest_argp, q = argp; *q;
8099                   gp += sizeof(abi_ulong), q++) {
8100                 if (get_user_ual(addr, gp)
8101                     || !addr)
8102                     break;
8103                 unlock_user(*q, addr, 0);
8104             }
8105             for (gp = guest_envp, q = envp; *q;
8106                   gp += sizeof(abi_ulong), q++) {
8107                 if (get_user_ual(addr, gp)
8108                     || !addr)
8109                     break;
8110                 unlock_user(*q, addr, 0);
8111             }
8112 
8113             g_free(argp);
8114             g_free(envp);
8115         }
8116         return ret;
8117     case TARGET_NR_chdir:
8118         if (!(p = lock_user_string(arg1)))
8119             return -TARGET_EFAULT;
8120         ret = get_errno(chdir(p));
8121         unlock_user(p, arg1, 0);
8122         return ret;
8123 #ifdef TARGET_NR_time
8124     case TARGET_NR_time:
8125         {
8126             time_t host_time;
8127             ret = get_errno(time(&host_time));
8128             if (!is_error(ret)
8129                 && arg1
8130                 && put_user_sal(host_time, arg1))
8131                 return -TARGET_EFAULT;
8132         }
8133         return ret;
8134 #endif
8135 #ifdef TARGET_NR_mknod
8136     case TARGET_NR_mknod:
8137         if (!(p = lock_user_string(arg1)))
8138             return -TARGET_EFAULT;
8139         ret = get_errno(mknod(p, arg2, arg3));
8140         unlock_user(p, arg1, 0);
8141         return ret;
8142 #endif
8143 #if defined(TARGET_NR_mknodat)
8144     case TARGET_NR_mknodat:
8145         if (!(p = lock_user_string(arg2)))
8146             return -TARGET_EFAULT;
8147         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8148         unlock_user(p, arg2, 0);
8149         return ret;
8150 #endif
8151 #ifdef TARGET_NR_chmod
8152     case TARGET_NR_chmod:
8153         if (!(p = lock_user_string(arg1)))
8154             return -TARGET_EFAULT;
8155         ret = get_errno(chmod(p, arg2));
8156         unlock_user(p, arg1, 0);
8157         return ret;
8158 #endif
8159 #ifdef TARGET_NR_lseek
8160     case TARGET_NR_lseek:
8161         return get_errno(lseek(arg1, arg2, arg3));
8162 #endif
8163 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8164     /* Alpha specific */
8165     case TARGET_NR_getxpid:
8166         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8167         return get_errno(getpid());
8168 #endif
8169 #ifdef TARGET_NR_getpid
8170     case TARGET_NR_getpid:
8171         return get_errno(getpid());
8172 #endif
8173     case TARGET_NR_mount:
8174         {
8175             /* need to look at the data field */
8176             void *p2, *p3;
8177 
8178             if (arg1) {
8179                 p = lock_user_string(arg1);
8180                 if (!p) {
8181                     return -TARGET_EFAULT;
8182                 }
8183             } else {
8184                 p = NULL;
8185             }
8186 
8187             p2 = lock_user_string(arg2);
8188             if (!p2) {
8189                 if (arg1) {
8190                     unlock_user(p, arg1, 0);
8191                 }
8192                 return -TARGET_EFAULT;
8193             }
8194 
8195             if (arg3) {
8196                 p3 = lock_user_string(arg3);
8197                 if (!p3) {
8198                     if (arg1) {
8199                         unlock_user(p, arg1, 0);
8200                     }
8201                     unlock_user(p2, arg2, 0);
8202                     return -TARGET_EFAULT;
8203                 }
8204             } else {
8205                 p3 = NULL;
8206             }
8207 
8208             /* FIXME - arg5 should be locked, but it isn't clear how to
8209              * do that since it's not guaranteed to be a NULL-terminated
8210              * string.
8211              */
8212             if (!arg5) {
8213                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8214             } else {
8215                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8216             }
8217             ret = get_errno(ret);
8218 
8219             if (arg1) {
8220                 unlock_user(p, arg1, 0);
8221             }
8222             unlock_user(p2, arg2, 0);
8223             if (arg3) {
8224                 unlock_user(p3, arg3, 0);
8225             }
8226         }
8227         return ret;
8228 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8229 #if defined(TARGET_NR_umount)
8230     case TARGET_NR_umount:
8231 #endif
8232 #if defined(TARGET_NR_oldumount)
8233     case TARGET_NR_oldumount:
8234 #endif
8235         if (!(p = lock_user_string(arg1)))
8236             return -TARGET_EFAULT;
8237         ret = get_errno(umount(p));
8238         unlock_user(p, arg1, 0);
8239         return ret;
8240 #endif
8241 #ifdef TARGET_NR_stime /* not on alpha */
8242     case TARGET_NR_stime:
8243         {
8244             struct timespec ts;
8245             ts.tv_nsec = 0;
8246             if (get_user_sal(ts.tv_sec, arg1)) {
8247                 return -TARGET_EFAULT;
8248             }
8249             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8250         }
8251 #endif
8252 #ifdef TARGET_NR_alarm /* not on alpha */
8253     case TARGET_NR_alarm:
8254         return alarm(arg1);
8255 #endif
8256 #ifdef TARGET_NR_pause /* not on alpha */
8257     case TARGET_NR_pause:
8258         if (!block_signals()) {
8259             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8260         }
8261         return -TARGET_EINTR;
8262 #endif
8263 #ifdef TARGET_NR_utime
8264     case TARGET_NR_utime:
8265         {
8266             struct utimbuf tbuf, *host_tbuf;
8267             struct target_utimbuf *target_tbuf;
8268             if (arg2) {
8269                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8270                     return -TARGET_EFAULT;
8271                 tbuf.actime = tswapal(target_tbuf->actime);
8272                 tbuf.modtime = tswapal(target_tbuf->modtime);
8273                 unlock_user_struct(target_tbuf, arg2, 0);
8274                 host_tbuf = &tbuf;
8275             } else {
8276                 host_tbuf = NULL;
8277             }
8278             if (!(p = lock_user_string(arg1)))
8279                 return -TARGET_EFAULT;
8280             ret = get_errno(utime(p, host_tbuf));
8281             unlock_user(p, arg1, 0);
8282         }
8283         return ret;
8284 #endif
8285 #ifdef TARGET_NR_utimes
8286     case TARGET_NR_utimes:
8287         {
8288             struct timeval *tvp, tv[2];
8289             if (arg2) {
8290                 if (copy_from_user_timeval(&tv[0], arg2)
8291                     || copy_from_user_timeval(&tv[1],
8292                                               arg2 + sizeof(struct target_timeval)))
8293                     return -TARGET_EFAULT;
8294                 tvp = tv;
8295             } else {
8296                 tvp = NULL;
8297             }
8298             if (!(p = lock_user_string(arg1)))
8299                 return -TARGET_EFAULT;
8300             ret = get_errno(utimes(p, tvp));
8301             unlock_user(p, arg1, 0);
8302         }
8303         return ret;
8304 #endif
8305 #if defined(TARGET_NR_futimesat)
8306     case TARGET_NR_futimesat:
8307         {
8308             struct timeval *tvp, tv[2];
8309             if (arg3) {
8310                 if (copy_from_user_timeval(&tv[0], arg3)
8311                     || copy_from_user_timeval(&tv[1],
8312                                               arg3 + sizeof(struct target_timeval)))
8313                     return -TARGET_EFAULT;
8314                 tvp = tv;
8315             } else {
8316                 tvp = NULL;
8317             }
8318             if (!(p = lock_user_string(arg2))) {
8319                 return -TARGET_EFAULT;
8320             }
8321             ret = get_errno(futimesat(arg1, path(p), tvp));
8322             unlock_user(p, arg2, 0);
8323         }
8324         return ret;
8325 #endif
8326 #ifdef TARGET_NR_access
8327     case TARGET_NR_access:
8328         if (!(p = lock_user_string(arg1))) {
8329             return -TARGET_EFAULT;
8330         }
8331         ret = get_errno(access(path(p), arg2));
8332         unlock_user(p, arg1, 0);
8333         return ret;
8334 #endif
8335 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8336     case TARGET_NR_faccessat:
8337         if (!(p = lock_user_string(arg2))) {
8338             return -TARGET_EFAULT;
8339         }
8340         ret = get_errno(faccessat(arg1, p, arg3, 0));
8341         unlock_user(p, arg2, 0);
8342         return ret;
8343 #endif
8344 #ifdef TARGET_NR_nice /* not on alpha */
8345     case TARGET_NR_nice:
8346         return get_errno(nice(arg1));
8347 #endif
8348     case TARGET_NR_sync:
8349         sync();
8350         return 0;
8351 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8352     case TARGET_NR_syncfs:
8353         return get_errno(syncfs(arg1));
8354 #endif
8355     case TARGET_NR_kill:
8356         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8357 #ifdef TARGET_NR_rename
8358     case TARGET_NR_rename:
8359         {
8360             void *p2;
8361             p = lock_user_string(arg1);
8362             p2 = lock_user_string(arg2);
8363             if (!p || !p2)
8364                 ret = -TARGET_EFAULT;
8365             else
8366                 ret = get_errno(rename(p, p2));
8367             unlock_user(p2, arg2, 0);
8368             unlock_user(p, arg1, 0);
8369         }
8370         return ret;
8371 #endif
8372 #if defined(TARGET_NR_renameat)
8373     case TARGET_NR_renameat:
8374         {
8375             void *p2;
8376             p  = lock_user_string(arg2);
8377             p2 = lock_user_string(arg4);
8378             if (!p || !p2)
8379                 ret = -TARGET_EFAULT;
8380             else
8381                 ret = get_errno(renameat(arg1, p, arg3, p2));
8382             unlock_user(p2, arg4, 0);
8383             unlock_user(p, arg2, 0);
8384         }
8385         return ret;
8386 #endif
8387 #if defined(TARGET_NR_renameat2)
8388     case TARGET_NR_renameat2:
8389         {
8390             void *p2;
8391             p  = lock_user_string(arg2);
8392             p2 = lock_user_string(arg4);
8393             if (!p || !p2) {
8394                 ret = -TARGET_EFAULT;
8395             } else {
8396                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8397             }
8398             unlock_user(p2, arg4, 0);
8399             unlock_user(p, arg2, 0);
8400         }
8401         return ret;
8402 #endif
8403 #ifdef TARGET_NR_mkdir
8404     case TARGET_NR_mkdir:
8405         if (!(p = lock_user_string(arg1)))
8406             return -TARGET_EFAULT;
8407         ret = get_errno(mkdir(p, arg2));
8408         unlock_user(p, arg1, 0);
8409         return ret;
8410 #endif
8411 #if defined(TARGET_NR_mkdirat)
8412     case TARGET_NR_mkdirat:
8413         if (!(p = lock_user_string(arg2)))
8414             return -TARGET_EFAULT;
8415         ret = get_errno(mkdirat(arg1, p, arg3));
8416         unlock_user(p, arg2, 0);
8417         return ret;
8418 #endif
8419 #ifdef TARGET_NR_rmdir
8420     case TARGET_NR_rmdir:
8421         if (!(p = lock_user_string(arg1)))
8422             return -TARGET_EFAULT;
8423         ret = get_errno(rmdir(p));
8424         unlock_user(p, arg1, 0);
8425         return ret;
8426 #endif
8427     case TARGET_NR_dup:
8428         ret = get_errno(dup(arg1));
8429         if (ret >= 0) {
8430             fd_trans_dup(arg1, ret);
8431         }
8432         return ret;
8433 #ifdef TARGET_NR_pipe
8434     case TARGET_NR_pipe:
8435         return do_pipe(cpu_env, arg1, 0, 0);
8436 #endif
8437 #ifdef TARGET_NR_pipe2
8438     case TARGET_NR_pipe2:
8439         return do_pipe(cpu_env, arg1,
8440                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8441 #endif
8442     case TARGET_NR_times:
8443         {
8444             struct target_tms *tmsp;
8445             struct tms tms;
8446             ret = get_errno(times(&tms));
8447             if (arg1) {
8448                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8449                 if (!tmsp)
8450                     return -TARGET_EFAULT;
8451                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8452                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8453                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8454                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8455             }
8456             if (!is_error(ret))
8457                 ret = host_to_target_clock_t(ret);
8458         }
8459         return ret;
8460     case TARGET_NR_acct:
8461         if (arg1 == 0) {
8462             ret = get_errno(acct(NULL));
8463         } else {
8464             if (!(p = lock_user_string(arg1))) {
8465                 return -TARGET_EFAULT;
8466             }
8467             ret = get_errno(acct(path(p)));
8468             unlock_user(p, arg1, 0);
8469         }
8470         return ret;
8471 #ifdef TARGET_NR_umount2
8472     case TARGET_NR_umount2:
8473         if (!(p = lock_user_string(arg1)))
8474             return -TARGET_EFAULT;
8475         ret = get_errno(umount2(p, arg2));
8476         unlock_user(p, arg1, 0);
8477         return ret;
8478 #endif
8479     case TARGET_NR_ioctl:
8480         return do_ioctl(arg1, arg2, arg3);
8481 #ifdef TARGET_NR_fcntl
8482     case TARGET_NR_fcntl:
8483         return do_fcntl(arg1, arg2, arg3);
8484 #endif
8485     case TARGET_NR_setpgid:
8486         return get_errno(setpgid(arg1, arg2));
8487     case TARGET_NR_umask:
8488         return get_errno(umask(arg1));
8489     case TARGET_NR_chroot:
8490         if (!(p = lock_user_string(arg1)))
8491             return -TARGET_EFAULT;
8492         ret = get_errno(chroot(p));
8493         unlock_user(p, arg1, 0);
8494         return ret;
8495 #ifdef TARGET_NR_dup2
8496     case TARGET_NR_dup2:
8497         ret = get_errno(dup2(arg1, arg2));
8498         if (ret >= 0) {
8499             fd_trans_dup(arg1, arg2);
8500         }
8501         return ret;
8502 #endif
8503 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8504     case TARGET_NR_dup3:
8505     {
8506         int host_flags;
8507 
8508         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8509             return -EINVAL;
8510         }
8511         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8512         ret = get_errno(dup3(arg1, arg2, host_flags));
8513         if (ret >= 0) {
8514             fd_trans_dup(arg1, arg2);
8515         }
8516         return ret;
8517     }
8518 #endif
8519 #ifdef TARGET_NR_getppid /* not on alpha */
8520     case TARGET_NR_getppid:
8521         return get_errno(getppid());
8522 #endif
8523 #ifdef TARGET_NR_getpgrp
8524     case TARGET_NR_getpgrp:
8525         return get_errno(getpgrp());
8526 #endif
8527     case TARGET_NR_setsid:
8528         return get_errno(setsid());
8529 #ifdef TARGET_NR_sigaction
8530     case TARGET_NR_sigaction:
8531         {
8532 #if defined(TARGET_ALPHA)
8533             struct target_sigaction act, oact, *pact = 0;
8534             struct target_old_sigaction *old_act;
8535             if (arg2) {
8536                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8537                     return -TARGET_EFAULT;
8538                 act._sa_handler = old_act->_sa_handler;
8539                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8540                 act.sa_flags = old_act->sa_flags;
8541                 act.sa_restorer = 0;
8542                 unlock_user_struct(old_act, arg2, 0);
8543                 pact = &act;
8544             }
8545             ret = get_errno(do_sigaction(arg1, pact, &oact));
8546             if (!is_error(ret) && arg3) {
8547                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8548                     return -TARGET_EFAULT;
8549                 old_act->_sa_handler = oact._sa_handler;
8550                 old_act->sa_mask = oact.sa_mask.sig[0];
8551                 old_act->sa_flags = oact.sa_flags;
8552                 unlock_user_struct(old_act, arg3, 1);
8553             }
8554 #elif defined(TARGET_MIPS)
8555 	    struct target_sigaction act, oact, *pact, *old_act;
8556 
8557 	    if (arg2) {
8558                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8559                     return -TARGET_EFAULT;
8560 		act._sa_handler = old_act->_sa_handler;
8561 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8562 		act.sa_flags = old_act->sa_flags;
8563 		unlock_user_struct(old_act, arg2, 0);
8564 		pact = &act;
8565 	    } else {
8566 		pact = NULL;
8567 	    }
8568 
8569 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8570 
8571 	    if (!is_error(ret) && arg3) {
8572                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8573                     return -TARGET_EFAULT;
8574 		old_act->_sa_handler = oact._sa_handler;
8575 		old_act->sa_flags = oact.sa_flags;
8576 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8577 		old_act->sa_mask.sig[1] = 0;
8578 		old_act->sa_mask.sig[2] = 0;
8579 		old_act->sa_mask.sig[3] = 0;
8580 		unlock_user_struct(old_act, arg3, 1);
8581 	    }
8582 #else
8583             struct target_old_sigaction *old_act;
8584             struct target_sigaction act, oact, *pact;
8585             if (arg2) {
8586                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8587                     return -TARGET_EFAULT;
8588                 act._sa_handler = old_act->_sa_handler;
8589                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8590                 act.sa_flags = old_act->sa_flags;
8591                 act.sa_restorer = old_act->sa_restorer;
8592 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8593                 act.ka_restorer = 0;
8594 #endif
8595                 unlock_user_struct(old_act, arg2, 0);
8596                 pact = &act;
8597             } else {
8598                 pact = NULL;
8599             }
8600             ret = get_errno(do_sigaction(arg1, pact, &oact));
8601             if (!is_error(ret) && arg3) {
8602                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8603                     return -TARGET_EFAULT;
8604                 old_act->_sa_handler = oact._sa_handler;
8605                 old_act->sa_mask = oact.sa_mask.sig[0];
8606                 old_act->sa_flags = oact.sa_flags;
8607                 old_act->sa_restorer = oact.sa_restorer;
8608                 unlock_user_struct(old_act, arg3, 1);
8609             }
8610 #endif
8611         }
8612         return ret;
8613 #endif
8614     case TARGET_NR_rt_sigaction:
8615         {
8616 #if defined(TARGET_ALPHA)
8617             /* For Alpha and SPARC this is a 5 argument syscall, with
8618              * a 'restorer' parameter which must be copied into the
8619              * sa_restorer field of the sigaction struct.
8620              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8621              * and arg5 is the sigsetsize.
8622              * Alpha also has a separate rt_sigaction struct that it uses
8623              * here; SPARC uses the usual sigaction struct.
8624              */
8625             struct target_rt_sigaction *rt_act;
8626             struct target_sigaction act, oact, *pact = 0;
8627 
8628             if (arg4 != sizeof(target_sigset_t)) {
8629                 return -TARGET_EINVAL;
8630             }
8631             if (arg2) {
8632                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8633                     return -TARGET_EFAULT;
8634                 act._sa_handler = rt_act->_sa_handler;
8635                 act.sa_mask = rt_act->sa_mask;
8636                 act.sa_flags = rt_act->sa_flags;
8637                 act.sa_restorer = arg5;
8638                 unlock_user_struct(rt_act, arg2, 0);
8639                 pact = &act;
8640             }
8641             ret = get_errno(do_sigaction(arg1, pact, &oact));
8642             if (!is_error(ret) && arg3) {
8643                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8644                     return -TARGET_EFAULT;
8645                 rt_act->_sa_handler = oact._sa_handler;
8646                 rt_act->sa_mask = oact.sa_mask;
8647                 rt_act->sa_flags = oact.sa_flags;
8648                 unlock_user_struct(rt_act, arg3, 1);
8649             }
8650 #else
8651 #ifdef TARGET_SPARC
8652             target_ulong restorer = arg4;
8653             target_ulong sigsetsize = arg5;
8654 #else
8655             target_ulong sigsetsize = arg4;
8656 #endif
8657             struct target_sigaction *act;
8658             struct target_sigaction *oact;
8659 
8660             if (sigsetsize != sizeof(target_sigset_t)) {
8661                 return -TARGET_EINVAL;
8662             }
8663             if (arg2) {
8664                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8665                     return -TARGET_EFAULT;
8666                 }
8667 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8668                 act->ka_restorer = restorer;
8669 #endif
8670             } else {
8671                 act = NULL;
8672             }
8673             if (arg3) {
8674                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8675                     ret = -TARGET_EFAULT;
8676                     goto rt_sigaction_fail;
8677                 }
8678             } else
8679                 oact = NULL;
8680             ret = get_errno(do_sigaction(arg1, act, oact));
8681 	rt_sigaction_fail:
8682             if (act)
8683                 unlock_user_struct(act, arg2, 0);
8684             if (oact)
8685                 unlock_user_struct(oact, arg3, 1);
8686 #endif
8687         }
8688         return ret;
8689 #ifdef TARGET_NR_sgetmask /* not on alpha */
8690     case TARGET_NR_sgetmask:
8691         {
8692             sigset_t cur_set;
8693             abi_ulong target_set;
8694             ret = do_sigprocmask(0, NULL, &cur_set);
8695             if (!ret) {
8696                 host_to_target_old_sigset(&target_set, &cur_set);
8697                 ret = target_set;
8698             }
8699         }
8700         return ret;
8701 #endif
8702 #ifdef TARGET_NR_ssetmask /* not on alpha */
8703     case TARGET_NR_ssetmask:
8704         {
8705             sigset_t set, oset;
8706             abi_ulong target_set = arg1;
8707             target_to_host_old_sigset(&set, &target_set);
8708             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8709             if (!ret) {
8710                 host_to_target_old_sigset(&target_set, &oset);
8711                 ret = target_set;
8712             }
8713         }
8714         return ret;
8715 #endif
8716 #ifdef TARGET_NR_sigprocmask
8717     case TARGET_NR_sigprocmask:
8718         {
8719 #if defined(TARGET_ALPHA)
8720             sigset_t set, oldset;
8721             abi_ulong mask;
8722             int how;
8723 
8724             switch (arg1) {
8725             case TARGET_SIG_BLOCK:
8726                 how = SIG_BLOCK;
8727                 break;
8728             case TARGET_SIG_UNBLOCK:
8729                 how = SIG_UNBLOCK;
8730                 break;
8731             case TARGET_SIG_SETMASK:
8732                 how = SIG_SETMASK;
8733                 break;
8734             default:
8735                 return -TARGET_EINVAL;
8736             }
8737             mask = arg2;
8738             target_to_host_old_sigset(&set, &mask);
8739 
8740             ret = do_sigprocmask(how, &set, &oldset);
8741             if (!is_error(ret)) {
8742                 host_to_target_old_sigset(&mask, &oldset);
8743                 ret = mask;
8744                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8745             }
8746 #else
8747             sigset_t set, oldset, *set_ptr;
8748             int how;
8749 
8750             if (arg2) {
8751                 switch (arg1) {
8752                 case TARGET_SIG_BLOCK:
8753                     how = SIG_BLOCK;
8754                     break;
8755                 case TARGET_SIG_UNBLOCK:
8756                     how = SIG_UNBLOCK;
8757                     break;
8758                 case TARGET_SIG_SETMASK:
8759                     how = SIG_SETMASK;
8760                     break;
8761                 default:
8762                     return -TARGET_EINVAL;
8763                 }
8764                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8765                     return -TARGET_EFAULT;
8766                 target_to_host_old_sigset(&set, p);
8767                 unlock_user(p, arg2, 0);
8768                 set_ptr = &set;
8769             } else {
8770                 how = 0;
8771                 set_ptr = NULL;
8772             }
8773             ret = do_sigprocmask(how, set_ptr, &oldset);
8774             if (!is_error(ret) && arg3) {
8775                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8776                     return -TARGET_EFAULT;
8777                 host_to_target_old_sigset(p, &oldset);
8778                 unlock_user(p, arg3, sizeof(target_sigset_t));
8779             }
8780 #endif
8781         }
8782         return ret;
8783 #endif
8784     case TARGET_NR_rt_sigprocmask:
8785         {
8786             int how = arg1;
8787             sigset_t set, oldset, *set_ptr;
8788 
8789             if (arg4 != sizeof(target_sigset_t)) {
8790                 return -TARGET_EINVAL;
8791             }
8792 
8793             if (arg2) {
8794                 switch(how) {
8795                 case TARGET_SIG_BLOCK:
8796                     how = SIG_BLOCK;
8797                     break;
8798                 case TARGET_SIG_UNBLOCK:
8799                     how = SIG_UNBLOCK;
8800                     break;
8801                 case TARGET_SIG_SETMASK:
8802                     how = SIG_SETMASK;
8803                     break;
8804                 default:
8805                     return -TARGET_EINVAL;
8806                 }
8807                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8808                     return -TARGET_EFAULT;
8809                 target_to_host_sigset(&set, p);
8810                 unlock_user(p, arg2, 0);
8811                 set_ptr = &set;
8812             } else {
8813                 how = 0;
8814                 set_ptr = NULL;
8815             }
8816             ret = do_sigprocmask(how, set_ptr, &oldset);
8817             if (!is_error(ret) && arg3) {
8818                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8819                     return -TARGET_EFAULT;
8820                 host_to_target_sigset(p, &oldset);
8821                 unlock_user(p, arg3, sizeof(target_sigset_t));
8822             }
8823         }
8824         return ret;
8825 #ifdef TARGET_NR_sigpending
8826     case TARGET_NR_sigpending:
8827         {
8828             sigset_t set;
8829             ret = get_errno(sigpending(&set));
8830             if (!is_error(ret)) {
8831                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8832                     return -TARGET_EFAULT;
8833                 host_to_target_old_sigset(p, &set);
8834                 unlock_user(p, arg1, sizeof(target_sigset_t));
8835             }
8836         }
8837         return ret;
8838 #endif
8839     case TARGET_NR_rt_sigpending:
8840         {
8841             sigset_t set;
8842 
8843             /* Yes, this check is >, not != like most. We follow the kernel's
8844              * logic and it does it like this because it implements
8845              * NR_sigpending through the same code path, and in that case
8846              * the old_sigset_t is smaller in size.
8847              */
8848             if (arg2 > sizeof(target_sigset_t)) {
8849                 return -TARGET_EINVAL;
8850             }
8851 
8852             ret = get_errno(sigpending(&set));
8853             if (!is_error(ret)) {
8854                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8855                     return -TARGET_EFAULT;
8856                 host_to_target_sigset(p, &set);
8857                 unlock_user(p, arg1, sizeof(target_sigset_t));
8858             }
8859         }
8860         return ret;
8861 #ifdef TARGET_NR_sigsuspend
8862     case TARGET_NR_sigsuspend:
8863         {
8864             TaskState *ts = cpu->opaque;
8865 #if defined(TARGET_ALPHA)
8866             abi_ulong mask = arg1;
8867             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8868 #else
8869             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8870                 return -TARGET_EFAULT;
8871             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8872             unlock_user(p, arg1, 0);
8873 #endif
8874             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8875                                                SIGSET_T_SIZE));
8876             if (ret != -TARGET_ERESTARTSYS) {
8877                 ts->in_sigsuspend = 1;
8878             }
8879         }
8880         return ret;
8881 #endif
8882     case TARGET_NR_rt_sigsuspend:
8883         {
8884             TaskState *ts = cpu->opaque;
8885 
8886             if (arg2 != sizeof(target_sigset_t)) {
8887                 return -TARGET_EINVAL;
8888             }
8889             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8890                 return -TARGET_EFAULT;
8891             target_to_host_sigset(&ts->sigsuspend_mask, p);
8892             unlock_user(p, arg1, 0);
8893             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8894                                                SIGSET_T_SIZE));
8895             if (ret != -TARGET_ERESTARTSYS) {
8896                 ts->in_sigsuspend = 1;
8897             }
8898         }
8899         return ret;
8900 #ifdef TARGET_NR_rt_sigtimedwait
8901     case TARGET_NR_rt_sigtimedwait:
8902         {
8903             sigset_t set;
8904             struct timespec uts, *puts;
8905             siginfo_t uinfo;
8906 
8907             if (arg4 != sizeof(target_sigset_t)) {
8908                 return -TARGET_EINVAL;
8909             }
8910 
8911             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8912                 return -TARGET_EFAULT;
8913             target_to_host_sigset(&set, p);
8914             unlock_user(p, arg1, 0);
8915             if (arg3) {
8916                 puts = &uts;
8917                 if (target_to_host_timespec(puts, arg3)) {
8918                     return -TARGET_EFAULT;
8919                 }
8920             } else {
8921                 puts = NULL;
8922             }
8923             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8924                                                  SIGSET_T_SIZE));
8925             if (!is_error(ret)) {
8926                 if (arg2) {
8927                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8928                                   0);
8929                     if (!p) {
8930                         return -TARGET_EFAULT;
8931                     }
8932                     host_to_target_siginfo(p, &uinfo);
8933                     unlock_user(p, arg2, sizeof(target_siginfo_t));
8934                 }
8935                 ret = host_to_target_signal(ret);
8936             }
8937         }
8938         return ret;
8939 #endif
8940     case TARGET_NR_rt_sigqueueinfo:
8941         {
8942             siginfo_t uinfo;
8943 
8944             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8945             if (!p) {
8946                 return -TARGET_EFAULT;
8947             }
8948             target_to_host_siginfo(&uinfo, p);
8949             unlock_user(p, arg3, 0);
8950             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8951         }
8952         return ret;
8953     case TARGET_NR_rt_tgsigqueueinfo:
8954         {
8955             siginfo_t uinfo;
8956 
8957             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8958             if (!p) {
8959                 return -TARGET_EFAULT;
8960             }
8961             target_to_host_siginfo(&uinfo, p);
8962             unlock_user(p, arg4, 0);
8963             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8964         }
8965         return ret;
8966 #ifdef TARGET_NR_sigreturn
8967     case TARGET_NR_sigreturn:
8968         if (block_signals()) {
8969             return -TARGET_ERESTARTSYS;
8970         }
8971         return do_sigreturn(cpu_env);
8972 #endif
8973     case TARGET_NR_rt_sigreturn:
8974         if (block_signals()) {
8975             return -TARGET_ERESTARTSYS;
8976         }
8977         return do_rt_sigreturn(cpu_env);
8978     case TARGET_NR_sethostname:
8979         if (!(p = lock_user_string(arg1)))
8980             return -TARGET_EFAULT;
8981         ret = get_errno(sethostname(p, arg2));
8982         unlock_user(p, arg1, 0);
8983         return ret;
8984 #ifdef TARGET_NR_setrlimit
8985     case TARGET_NR_setrlimit:
8986         {
8987             int resource = target_to_host_resource(arg1);
8988             struct target_rlimit *target_rlim;
8989             struct rlimit rlim;
8990             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8991                 return -TARGET_EFAULT;
8992             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8993             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8994             unlock_user_struct(target_rlim, arg2, 0);
8995             /*
8996              * If we just passed through resource limit settings for memory then
8997              * they would also apply to QEMU's own allocations, and QEMU will
8998              * crash or hang or die if its allocations fail. Ideally we would
8999              * track the guest allocations in QEMU and apply the limits ourselves.
9000              * For now, just tell the guest the call succeeded but don't actually
9001              * limit anything.
9002              */
9003             if (resource != RLIMIT_AS &&
9004                 resource != RLIMIT_DATA &&
9005                 resource != RLIMIT_STACK) {
9006                 return get_errno(setrlimit(resource, &rlim));
9007             } else {
9008                 return 0;
9009             }
9010         }
9011 #endif
9012 #ifdef TARGET_NR_getrlimit
9013     case TARGET_NR_getrlimit:
9014         {
9015             int resource = target_to_host_resource(arg1);
9016             struct target_rlimit *target_rlim;
9017             struct rlimit rlim;
9018 
9019             ret = get_errno(getrlimit(resource, &rlim));
9020             if (!is_error(ret)) {
9021                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9022                     return -TARGET_EFAULT;
9023                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9024                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9025                 unlock_user_struct(target_rlim, arg2, 1);
9026             }
9027         }
9028         return ret;
9029 #endif
9030     case TARGET_NR_getrusage:
9031         {
9032             struct rusage rusage;
9033             ret = get_errno(getrusage(arg1, &rusage));
9034             if (!is_error(ret)) {
9035                 ret = host_to_target_rusage(arg2, &rusage);
9036             }
9037         }
9038         return ret;
9039 #if defined(TARGET_NR_gettimeofday)
9040     case TARGET_NR_gettimeofday:
9041         {
9042             struct timeval tv;
9043             struct timezone tz;
9044 
9045             ret = get_errno(gettimeofday(&tv, &tz));
9046             if (!is_error(ret)) {
9047                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9048                     return -TARGET_EFAULT;
9049                 }
9050                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9051                     return -TARGET_EFAULT;
9052                 }
9053             }
9054         }
9055         return ret;
9056 #endif
9057 #if defined(TARGET_NR_settimeofday)
9058     case TARGET_NR_settimeofday:
9059         {
9060             struct timeval tv, *ptv = NULL;
9061             struct timezone tz, *ptz = NULL;
9062 
9063             if (arg1) {
9064                 if (copy_from_user_timeval(&tv, arg1)) {
9065                     return -TARGET_EFAULT;
9066                 }
9067                 ptv = &tv;
9068             }
9069 
9070             if (arg2) {
9071                 if (copy_from_user_timezone(&tz, arg2)) {
9072                     return -TARGET_EFAULT;
9073                 }
9074                 ptz = &tz;
9075             }
9076 
9077             return get_errno(settimeofday(ptv, ptz));
9078         }
9079 #endif
9080 #if defined(TARGET_NR_select)
9081     case TARGET_NR_select:
9082 #if defined(TARGET_WANT_NI_OLD_SELECT)
9083         /* some architectures used to have old_select here
9084          * but now ENOSYS it.
9085          */
9086         ret = -TARGET_ENOSYS;
9087 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9088         ret = do_old_select(arg1);
9089 #else
9090         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9091 #endif
9092         return ret;
9093 #endif
9094 #ifdef TARGET_NR_pselect6
9095     case TARGET_NR_pselect6:
9096         {
9097             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9098             fd_set rfds, wfds, efds;
9099             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9100             struct timespec ts, *ts_ptr;
9101 
9102             /*
9103              * The 6th arg is actually two args smashed together,
9104              * so we cannot use the C library.
9105              */
9106             sigset_t set;
9107             struct {
9108                 sigset_t *set;
9109                 size_t size;
9110             } sig, *sig_ptr;
9111 
9112             abi_ulong arg_sigset, arg_sigsize, *arg7;
9113             target_sigset_t *target_sigset;
9114 
9115             n = arg1;
9116             rfd_addr = arg2;
9117             wfd_addr = arg3;
9118             efd_addr = arg4;
9119             ts_addr = arg5;
9120 
9121             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9122             if (ret) {
9123                 return ret;
9124             }
9125             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9126             if (ret) {
9127                 return ret;
9128             }
9129             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9130             if (ret) {
9131                 return ret;
9132             }
9133 
9134             /*
9135              * This takes a timespec, and not a timeval, so we cannot
9136              * use the do_select() helper ...
9137              */
9138             if (ts_addr) {
9139                 if (target_to_host_timespec(&ts, ts_addr)) {
9140                     return -TARGET_EFAULT;
9141                 }
9142                 ts_ptr = &ts;
9143             } else {
9144                 ts_ptr = NULL;
9145             }
9146 
9147             /* Extract the two packed args for the sigset */
9148             if (arg6) {
9149                 sig_ptr = &sig;
9150                 sig.size = SIGSET_T_SIZE;
9151 
9152                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9153                 if (!arg7) {
9154                     return -TARGET_EFAULT;
9155                 }
9156                 arg_sigset = tswapal(arg7[0]);
9157                 arg_sigsize = tswapal(arg7[1]);
9158                 unlock_user(arg7, arg6, 0);
9159 
9160                 if (arg_sigset) {
9161                     sig.set = &set;
9162                     if (arg_sigsize != sizeof(*target_sigset)) {
9163                         /* Like the kernel, we enforce correct size sigsets */
9164                         return -TARGET_EINVAL;
9165                     }
9166                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9167                                               sizeof(*target_sigset), 1);
9168                     if (!target_sigset) {
9169                         return -TARGET_EFAULT;
9170                     }
9171                     target_to_host_sigset(&set, target_sigset);
9172                     unlock_user(target_sigset, arg_sigset, 0);
9173                 } else {
9174                     sig.set = NULL;
9175                 }
9176             } else {
9177                 sig_ptr = NULL;
9178             }
9179 
9180             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9181                                           ts_ptr, sig_ptr));
9182 
9183             if (!is_error(ret)) {
9184                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9185                     return -TARGET_EFAULT;
9186                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9187                     return -TARGET_EFAULT;
9188                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9189                     return -TARGET_EFAULT;
9190 
9191                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9192                     return -TARGET_EFAULT;
9193             }
9194         }
9195         return ret;
9196 #endif
9197 #ifdef TARGET_NR_symlink
9198     case TARGET_NR_symlink:
9199         {
9200             void *p2;
9201             p = lock_user_string(arg1);
9202             p2 = lock_user_string(arg2);
9203             if (!p || !p2)
9204                 ret = -TARGET_EFAULT;
9205             else
9206                 ret = get_errno(symlink(p, p2));
9207             unlock_user(p2, arg2, 0);
9208             unlock_user(p, arg1, 0);
9209         }
9210         return ret;
9211 #endif
9212 #if defined(TARGET_NR_symlinkat)
9213     case TARGET_NR_symlinkat:
9214         {
9215             void *p2;
9216             p  = lock_user_string(arg1);
9217             p2 = lock_user_string(arg3);
9218             if (!p || !p2)
9219                 ret = -TARGET_EFAULT;
9220             else
9221                 ret = get_errno(symlinkat(p, arg2, p2));
9222             unlock_user(p2, arg3, 0);
9223             unlock_user(p, arg1, 0);
9224         }
9225         return ret;
9226 #endif
9227 #ifdef TARGET_NR_readlink
9228     case TARGET_NR_readlink:
9229         {
9230             void *p2;
9231             p = lock_user_string(arg1);
9232             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9233             if (!p || !p2) {
9234                 ret = -TARGET_EFAULT;
9235             } else if (!arg3) {
9236                 /* Short circuit this for the magic exe check. */
9237                 ret = -TARGET_EINVAL;
9238             } else if (is_proc_myself((const char *)p, "exe")) {
9239                 char real[PATH_MAX], *temp;
9240                 temp = realpath(exec_path, real);
9241                 /* Return value is # of bytes that we wrote to the buffer. */
9242                 if (temp == NULL) {
9243                     ret = get_errno(-1);
9244                 } else {
9245                     /* Don't worry about sign mismatch as earlier mapping
9246                      * logic would have thrown a bad address error. */
9247                     ret = MIN(strlen(real), arg3);
9248                     /* We cannot NUL terminate the string. */
9249                     memcpy(p2, real, ret);
9250                 }
9251             } else {
9252                 ret = get_errno(readlink(path(p), p2, arg3));
9253             }
9254             unlock_user(p2, arg2, ret);
9255             unlock_user(p, arg1, 0);
9256         }
9257         return ret;
9258 #endif
9259 #if defined(TARGET_NR_readlinkat)
9260     case TARGET_NR_readlinkat:
9261         {
9262             void *p2;
9263             p  = lock_user_string(arg2);
9264             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9265             if (!p || !p2) {
9266                 ret = -TARGET_EFAULT;
9267             } else if (is_proc_myself((const char *)p, "exe")) {
9268                 char real[PATH_MAX], *temp;
9269                 temp = realpath(exec_path, real);
9270                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9271                 snprintf((char *)p2, arg4, "%s", real);
9272             } else {
9273                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9274             }
9275             unlock_user(p2, arg3, ret);
9276             unlock_user(p, arg2, 0);
9277         }
9278         return ret;
9279 #endif
9280 #ifdef TARGET_NR_swapon
9281     case TARGET_NR_swapon:
9282         if (!(p = lock_user_string(arg1)))
9283             return -TARGET_EFAULT;
9284         ret = get_errno(swapon(p, arg2));
9285         unlock_user(p, arg1, 0);
9286         return ret;
9287 #endif
9288     case TARGET_NR_reboot:
9289         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9290            /* arg4 must be ignored in all other cases */
9291            p = lock_user_string(arg4);
9292            if (!p) {
9293                return -TARGET_EFAULT;
9294            }
9295            ret = get_errno(reboot(arg1, arg2, arg3, p));
9296            unlock_user(p, arg4, 0);
9297         } else {
9298            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9299         }
9300         return ret;
9301 #ifdef TARGET_NR_mmap
9302     case TARGET_NR_mmap:
9303 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9304     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9305     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9306     || defined(TARGET_S390X)
9307         {
9308             abi_ulong *v;
9309             abi_ulong v1, v2, v3, v4, v5, v6;
9310             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9311                 return -TARGET_EFAULT;
9312             v1 = tswapal(v[0]);
9313             v2 = tswapal(v[1]);
9314             v3 = tswapal(v[2]);
9315             v4 = tswapal(v[3]);
9316             v5 = tswapal(v[4]);
9317             v6 = tswapal(v[5]);
9318             unlock_user(v, arg1, 0);
9319             ret = get_errno(target_mmap(v1, v2, v3,
9320                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9321                                         v5, v6));
9322         }
9323 #else
9324         ret = get_errno(target_mmap(arg1, arg2, arg3,
9325                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9326                                     arg5,
9327                                     arg6));
9328 #endif
9329         return ret;
9330 #endif
9331 #ifdef TARGET_NR_mmap2
9332     case TARGET_NR_mmap2:
9333 #ifndef MMAP_SHIFT
9334 #define MMAP_SHIFT 12
9335 #endif
9336         ret = target_mmap(arg1, arg2, arg3,
9337                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9338                           arg5, arg6 << MMAP_SHIFT);
9339         return get_errno(ret);
9340 #endif
9341     case TARGET_NR_munmap:
9342         return get_errno(target_munmap(arg1, arg2));
9343     case TARGET_NR_mprotect:
9344         {
9345             TaskState *ts = cpu->opaque;
9346             /* Special hack to detect libc making the stack executable.  */
9347             if ((arg3 & PROT_GROWSDOWN)
9348                 && arg1 >= ts->info->stack_limit
9349                 && arg1 <= ts->info->start_stack) {
9350                 arg3 &= ~PROT_GROWSDOWN;
9351                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9352                 arg1 = ts->info->stack_limit;
9353             }
9354         }
9355         return get_errno(target_mprotect(arg1, arg2, arg3));
9356 #ifdef TARGET_NR_mremap
9357     case TARGET_NR_mremap:
9358         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9359 #endif
9360         /* ??? msync/mlock/munlock are broken for softmmu.  */
9361 #ifdef TARGET_NR_msync
9362     case TARGET_NR_msync:
9363         return get_errno(msync(g2h(arg1), arg2, arg3));
9364 #endif
9365 #ifdef TARGET_NR_mlock
9366     case TARGET_NR_mlock:
9367         return get_errno(mlock(g2h(arg1), arg2));
9368 #endif
9369 #ifdef TARGET_NR_munlock
9370     case TARGET_NR_munlock:
9371         return get_errno(munlock(g2h(arg1), arg2));
9372 #endif
9373 #ifdef TARGET_NR_mlockall
9374     case TARGET_NR_mlockall:
9375         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9376 #endif
9377 #ifdef TARGET_NR_munlockall
9378     case TARGET_NR_munlockall:
9379         return get_errno(munlockall());
9380 #endif
9381 #ifdef TARGET_NR_truncate
9382     case TARGET_NR_truncate:
9383         if (!(p = lock_user_string(arg1)))
9384             return -TARGET_EFAULT;
9385         ret = get_errno(truncate(p, arg2));
9386         unlock_user(p, arg1, 0);
9387         return ret;
9388 #endif
9389 #ifdef TARGET_NR_ftruncate
9390     case TARGET_NR_ftruncate:
9391         return get_errno(ftruncate(arg1, arg2));
9392 #endif
9393     case TARGET_NR_fchmod:
9394         return get_errno(fchmod(arg1, arg2));
9395 #if defined(TARGET_NR_fchmodat)
9396     case TARGET_NR_fchmodat:
9397         if (!(p = lock_user_string(arg2)))
9398             return -TARGET_EFAULT;
9399         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9400         unlock_user(p, arg2, 0);
9401         return ret;
9402 #endif
9403     case TARGET_NR_getpriority:
9404         /* Note that negative values are valid for getpriority, so we must
9405            differentiate based on errno settings.  */
9406         errno = 0;
9407         ret = getpriority(arg1, arg2);
9408         if (ret == -1 && errno != 0) {
9409             return -host_to_target_errno(errno);
9410         }
9411 #ifdef TARGET_ALPHA
9412         /* Return value is the unbiased priority.  Signal no error.  */
9413         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9414 #else
9415         /* Return value is a biased priority to avoid negative numbers.  */
9416         ret = 20 - ret;
9417 #endif
9418         return ret;
9419     case TARGET_NR_setpriority:
9420         return get_errno(setpriority(arg1, arg2, arg3));
9421 #ifdef TARGET_NR_statfs
9422     case TARGET_NR_statfs:
9423         if (!(p = lock_user_string(arg1))) {
9424             return -TARGET_EFAULT;
9425         }
9426         ret = get_errno(statfs(path(p), &stfs));
9427         unlock_user(p, arg1, 0);
9428     convert_statfs:
9429         if (!is_error(ret)) {
9430             struct target_statfs *target_stfs;
9431 
9432             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9433                 return -TARGET_EFAULT;
9434             __put_user(stfs.f_type, &target_stfs->f_type);
9435             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9436             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9437             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9438             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9439             __put_user(stfs.f_files, &target_stfs->f_files);
9440             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9441             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9442             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9443             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9444             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9445 #ifdef _STATFS_F_FLAGS
9446             __put_user(stfs.f_flags, &target_stfs->f_flags);
9447 #else
9448             __put_user(0, &target_stfs->f_flags);
9449 #endif
9450             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9451             unlock_user_struct(target_stfs, arg2, 1);
9452         }
9453         return ret;
9454 #endif
9455 #ifdef TARGET_NR_fstatfs
9456     case TARGET_NR_fstatfs:
9457         ret = get_errno(fstatfs(arg1, &stfs));
9458         goto convert_statfs;
9459 #endif
9460 #ifdef TARGET_NR_statfs64
9461     case TARGET_NR_statfs64:
9462         if (!(p = lock_user_string(arg1))) {
9463             return -TARGET_EFAULT;
9464         }
9465         ret = get_errno(statfs(path(p), &stfs));
9466         unlock_user(p, arg1, 0);
9467     convert_statfs64:
9468         if (!is_error(ret)) {
9469             struct target_statfs64 *target_stfs;
9470 
9471             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9472                 return -TARGET_EFAULT;
9473             __put_user(stfs.f_type, &target_stfs->f_type);
9474             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9475             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9476             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9477             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9478             __put_user(stfs.f_files, &target_stfs->f_files);
9479             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9480             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9481             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9482             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9483             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9484             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9485             unlock_user_struct(target_stfs, arg3, 1);
9486         }
9487         return ret;
9488     case TARGET_NR_fstatfs64:
9489         ret = get_errno(fstatfs(arg1, &stfs));
9490         goto convert_statfs64;
9491 #endif
9492 #ifdef TARGET_NR_socketcall
9493     case TARGET_NR_socketcall:
9494         return do_socketcall(arg1, arg2);
9495 #endif
9496 #ifdef TARGET_NR_accept
9497     case TARGET_NR_accept:
9498         return do_accept4(arg1, arg2, arg3, 0);
9499 #endif
9500 #ifdef TARGET_NR_accept4
9501     case TARGET_NR_accept4:
9502         return do_accept4(arg1, arg2, arg3, arg4);
9503 #endif
9504 #ifdef TARGET_NR_bind
9505     case TARGET_NR_bind:
9506         return do_bind(arg1, arg2, arg3);
9507 #endif
9508 #ifdef TARGET_NR_connect
9509     case TARGET_NR_connect:
9510         return do_connect(arg1, arg2, arg3);
9511 #endif
9512 #ifdef TARGET_NR_getpeername
9513     case TARGET_NR_getpeername:
9514         return do_getpeername(arg1, arg2, arg3);
9515 #endif
9516 #ifdef TARGET_NR_getsockname
9517     case TARGET_NR_getsockname:
9518         return do_getsockname(arg1, arg2, arg3);
9519 #endif
9520 #ifdef TARGET_NR_getsockopt
9521     case TARGET_NR_getsockopt:
9522         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9523 #endif
9524 #ifdef TARGET_NR_listen
9525     case TARGET_NR_listen:
9526         return get_errno(listen(arg1, arg2));
9527 #endif
9528 #ifdef TARGET_NR_recv
9529     case TARGET_NR_recv:
9530         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9531 #endif
9532 #ifdef TARGET_NR_recvfrom
9533     case TARGET_NR_recvfrom:
9534         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9535 #endif
9536 #ifdef TARGET_NR_recvmsg
9537     case TARGET_NR_recvmsg:
9538         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9539 #endif
9540 #ifdef TARGET_NR_send
9541     case TARGET_NR_send:
9542         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9543 #endif
9544 #ifdef TARGET_NR_sendmsg
9545     case TARGET_NR_sendmsg:
9546         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9547 #endif
9548 #ifdef TARGET_NR_sendmmsg
9549     case TARGET_NR_sendmmsg:
9550         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9551 #endif
9552 #ifdef TARGET_NR_recvmmsg
9553     case TARGET_NR_recvmmsg:
9554         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9555 #endif
9556 #ifdef TARGET_NR_sendto
9557     case TARGET_NR_sendto:
9558         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9559 #endif
9560 #ifdef TARGET_NR_shutdown
9561     case TARGET_NR_shutdown:
9562         return get_errno(shutdown(arg1, arg2));
9563 #endif
9564 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9565     case TARGET_NR_getrandom:
9566         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9567         if (!p) {
9568             return -TARGET_EFAULT;
9569         }
9570         ret = get_errno(getrandom(p, arg2, arg3));
9571         unlock_user(p, arg1, ret);
9572         return ret;
9573 #endif
9574 #ifdef TARGET_NR_socket
9575     case TARGET_NR_socket:
9576         return do_socket(arg1, arg2, arg3);
9577 #endif
9578 #ifdef TARGET_NR_socketpair
9579     case TARGET_NR_socketpair:
9580         return do_socketpair(arg1, arg2, arg3, arg4);
9581 #endif
9582 #ifdef TARGET_NR_setsockopt
9583     case TARGET_NR_setsockopt:
9584         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9585 #endif
9586 #if defined(TARGET_NR_syslog)
9587     case TARGET_NR_syslog:
9588         {
9589             int len = arg2;
9590 
9591             switch (arg1) {
9592             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9593             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9594             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9595             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9596             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9597             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9598             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9599             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9600                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9601             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9602             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9603             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9604                 {
9605                     if (len < 0) {
9606                         return -TARGET_EINVAL;
9607                     }
9608                     if (len == 0) {
9609                         return 0;
9610                     }
9611                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9612                     if (!p) {
9613                         return -TARGET_EFAULT;
9614                     }
9615                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9616                     unlock_user(p, arg2, arg3);
9617                 }
9618                 return ret;
9619             default:
9620                 return -TARGET_EINVAL;
9621             }
9622         }
9623         break;
9624 #endif
9625     case TARGET_NR_setitimer:
9626         {
9627             struct itimerval value, ovalue, *pvalue;
9628 
9629             if (arg2) {
9630                 pvalue = &value;
9631                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9632                     || copy_from_user_timeval(&pvalue->it_value,
9633                                               arg2 + sizeof(struct target_timeval)))
9634                     return -TARGET_EFAULT;
9635             } else {
9636                 pvalue = NULL;
9637             }
9638             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9639             if (!is_error(ret) && arg3) {
9640                 if (copy_to_user_timeval(arg3,
9641                                          &ovalue.it_interval)
9642                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9643                                             &ovalue.it_value))
9644                     return -TARGET_EFAULT;
9645             }
9646         }
9647         return ret;
9648     case TARGET_NR_getitimer:
9649         {
9650             struct itimerval value;
9651 
9652             ret = get_errno(getitimer(arg1, &value));
9653             if (!is_error(ret) && arg2) {
9654                 if (copy_to_user_timeval(arg2,
9655                                          &value.it_interval)
9656                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9657                                             &value.it_value))
9658                     return -TARGET_EFAULT;
9659             }
9660         }
9661         return ret;
9662 #ifdef TARGET_NR_stat
9663     case TARGET_NR_stat:
9664         if (!(p = lock_user_string(arg1))) {
9665             return -TARGET_EFAULT;
9666         }
9667         ret = get_errno(stat(path(p), &st));
9668         unlock_user(p, arg1, 0);
9669         goto do_stat;
9670 #endif
9671 #ifdef TARGET_NR_lstat
9672     case TARGET_NR_lstat:
9673         if (!(p = lock_user_string(arg1))) {
9674             return -TARGET_EFAULT;
9675         }
9676         ret = get_errno(lstat(path(p), &st));
9677         unlock_user(p, arg1, 0);
9678         goto do_stat;
9679 #endif
9680 #ifdef TARGET_NR_fstat
9681     case TARGET_NR_fstat:
9682         {
9683             ret = get_errno(fstat(arg1, &st));
9684 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9685         do_stat:
9686 #endif
9687             if (!is_error(ret)) {
9688                 struct target_stat *target_st;
9689 
9690                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9691                     return -TARGET_EFAULT;
9692                 memset(target_st, 0, sizeof(*target_st));
9693                 __put_user(st.st_dev, &target_st->st_dev);
9694                 __put_user(st.st_ino, &target_st->st_ino);
9695                 __put_user(st.st_mode, &target_st->st_mode);
9696                 __put_user(st.st_uid, &target_st->st_uid);
9697                 __put_user(st.st_gid, &target_st->st_gid);
9698                 __put_user(st.st_nlink, &target_st->st_nlink);
9699                 __put_user(st.st_rdev, &target_st->st_rdev);
9700                 __put_user(st.st_size, &target_st->st_size);
9701                 __put_user(st.st_blksize, &target_st->st_blksize);
9702                 __put_user(st.st_blocks, &target_st->st_blocks);
9703                 __put_user(st.st_atime, &target_st->target_st_atime);
9704                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9705                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9706 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9707     defined(TARGET_STAT_HAVE_NSEC)
9708                 __put_user(st.st_atim.tv_nsec,
9709                            &target_st->target_st_atime_nsec);
9710                 __put_user(st.st_mtim.tv_nsec,
9711                            &target_st->target_st_mtime_nsec);
9712                 __put_user(st.st_ctim.tv_nsec,
9713                            &target_st->target_st_ctime_nsec);
9714 #endif
9715                 unlock_user_struct(target_st, arg2, 1);
9716             }
9717         }
9718         return ret;
9719 #endif
9720     case TARGET_NR_vhangup:
9721         return get_errno(vhangup());
9722 #ifdef TARGET_NR_syscall
9723     case TARGET_NR_syscall:
9724         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9725                           arg6, arg7, arg8, 0);
9726 #endif
9727 #if defined(TARGET_NR_wait4)
9728     case TARGET_NR_wait4:
9729         {
9730             int status;
9731             abi_long status_ptr = arg2;
9732             struct rusage rusage, *rusage_ptr;
9733             abi_ulong target_rusage = arg4;
9734             abi_long rusage_err;
9735             if (target_rusage)
9736                 rusage_ptr = &rusage;
9737             else
9738                 rusage_ptr = NULL;
9739             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9740             if (!is_error(ret)) {
9741                 if (status_ptr && ret) {
9742                     status = host_to_target_waitstatus(status);
9743                     if (put_user_s32(status, status_ptr))
9744                         return -TARGET_EFAULT;
9745                 }
9746                 if (target_rusage) {
9747                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9748                     if (rusage_err) {
9749                         ret = rusage_err;
9750                     }
9751                 }
9752             }
9753         }
9754         return ret;
9755 #endif
9756 #ifdef TARGET_NR_swapoff
9757     case TARGET_NR_swapoff:
9758         if (!(p = lock_user_string(arg1)))
9759             return -TARGET_EFAULT;
9760         ret = get_errno(swapoff(p));
9761         unlock_user(p, arg1, 0);
9762         return ret;
9763 #endif
9764     case TARGET_NR_sysinfo:
9765         {
9766             struct target_sysinfo *target_value;
9767             struct sysinfo value;
9768             ret = get_errno(sysinfo(&value));
9769             if (!is_error(ret) && arg1)
9770             {
9771                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9772                     return -TARGET_EFAULT;
9773                 __put_user(value.uptime, &target_value->uptime);
9774                 __put_user(value.loads[0], &target_value->loads[0]);
9775                 __put_user(value.loads[1], &target_value->loads[1]);
9776                 __put_user(value.loads[2], &target_value->loads[2]);
9777                 __put_user(value.totalram, &target_value->totalram);
9778                 __put_user(value.freeram, &target_value->freeram);
9779                 __put_user(value.sharedram, &target_value->sharedram);
9780                 __put_user(value.bufferram, &target_value->bufferram);
9781                 __put_user(value.totalswap, &target_value->totalswap);
9782                 __put_user(value.freeswap, &target_value->freeswap);
9783                 __put_user(value.procs, &target_value->procs);
9784                 __put_user(value.totalhigh, &target_value->totalhigh);
9785                 __put_user(value.freehigh, &target_value->freehigh);
9786                 __put_user(value.mem_unit, &target_value->mem_unit);
9787                 unlock_user_struct(target_value, arg1, 1);
9788             }
9789         }
9790         return ret;
9791 #ifdef TARGET_NR_ipc
9792     case TARGET_NR_ipc:
9793         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9794 #endif
9795 #ifdef TARGET_NR_semget
9796     case TARGET_NR_semget:
9797         return get_errno(semget(arg1, arg2, arg3));
9798 #endif
9799 #ifdef TARGET_NR_semop
9800     case TARGET_NR_semop:
9801         return do_semtimedop(arg1, arg2, arg3, 0);
9802 #endif
9803 #ifdef TARGET_NR_semtimedop
9804     case TARGET_NR_semtimedop:
9805         return do_semtimedop(arg1, arg2, arg3, arg4);
9806 #endif
9807 #ifdef TARGET_NR_semctl
9808     case TARGET_NR_semctl:
9809         return do_semctl(arg1, arg2, arg3, arg4);
9810 #endif
9811 #ifdef TARGET_NR_msgctl
9812     case TARGET_NR_msgctl:
9813         return do_msgctl(arg1, arg2, arg3);
9814 #endif
9815 #ifdef TARGET_NR_msgget
9816     case TARGET_NR_msgget:
9817         return get_errno(msgget(arg1, arg2));
9818 #endif
9819 #ifdef TARGET_NR_msgrcv
9820     case TARGET_NR_msgrcv:
9821         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9822 #endif
9823 #ifdef TARGET_NR_msgsnd
9824     case TARGET_NR_msgsnd:
9825         return do_msgsnd(arg1, arg2, arg3, arg4);
9826 #endif
9827 #ifdef TARGET_NR_shmget
9828     case TARGET_NR_shmget:
9829         return get_errno(shmget(arg1, arg2, arg3));
9830 #endif
9831 #ifdef TARGET_NR_shmctl
9832     case TARGET_NR_shmctl:
9833         return do_shmctl(arg1, arg2, arg3);
9834 #endif
9835 #ifdef TARGET_NR_shmat
9836     case TARGET_NR_shmat:
9837         return do_shmat(cpu_env, arg1, arg2, arg3);
9838 #endif
9839 #ifdef TARGET_NR_shmdt
9840     case TARGET_NR_shmdt:
9841         return do_shmdt(arg1);
9842 #endif
9843     case TARGET_NR_fsync:
9844         return get_errno(fsync(arg1));
9845     case TARGET_NR_clone:
9846         /* Linux manages to have three different orderings for its
9847          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9848          * match the kernel's CONFIG_CLONE_* settings.
9849          * Microblaze is further special in that it uses a sixth
9850          * implicit argument to clone for the TLS pointer.
9851          */
9852 #if defined(TARGET_MICROBLAZE)
9853         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9854 #elif defined(TARGET_CLONE_BACKWARDS)
9855         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9856 #elif defined(TARGET_CLONE_BACKWARDS2)
9857         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9858 #else
9859         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9860 #endif
9861         return ret;
9862 #ifdef __NR_exit_group
9863         /* new thread calls */
9864     case TARGET_NR_exit_group:
9865         preexit_cleanup(cpu_env, arg1);
9866         return get_errno(exit_group(arg1));
9867 #endif
9868     case TARGET_NR_setdomainname:
9869         if (!(p = lock_user_string(arg1)))
9870             return -TARGET_EFAULT;
9871         ret = get_errno(setdomainname(p, arg2));
9872         unlock_user(p, arg1, 0);
9873         return ret;
9874     case TARGET_NR_uname:
9875         /* no need to transcode because we use the linux syscall */
9876         {
9877             struct new_utsname * buf;
9878 
9879             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9880                 return -TARGET_EFAULT;
9881             ret = get_errno(sys_uname(buf));
9882             if (!is_error(ret)) {
9883                 /* Overwrite the native machine name with whatever is being
9884                    emulated. */
9885                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9886                           sizeof(buf->machine));
9887                 /* Allow the user to override the reported release.  */
9888                 if (qemu_uname_release && *qemu_uname_release) {
9889                     g_strlcpy(buf->release, qemu_uname_release,
9890                               sizeof(buf->release));
9891                 }
9892             }
9893             unlock_user_struct(buf, arg1, 1);
9894         }
9895         return ret;
9896 #ifdef TARGET_I386
9897     case TARGET_NR_modify_ldt:
9898         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9899 #if !defined(TARGET_X86_64)
9900     case TARGET_NR_vm86:
9901         return do_vm86(cpu_env, arg1, arg2);
9902 #endif
9903 #endif
9904 #if defined(TARGET_NR_adjtimex)
9905     case TARGET_NR_adjtimex:
9906         {
9907             struct timex host_buf;
9908 
9909             if (target_to_host_timex(&host_buf, arg1) != 0) {
9910                 return -TARGET_EFAULT;
9911             }
9912             ret = get_errno(adjtimex(&host_buf));
9913             if (!is_error(ret)) {
9914                 if (host_to_target_timex(arg1, &host_buf) != 0) {
9915                     return -TARGET_EFAULT;
9916                 }
9917             }
9918         }
9919         return ret;
9920 #endif
9921 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9922     case TARGET_NR_clock_adjtime:
9923         {
9924             struct timex htx, *phtx = &htx;
9925 
9926             if (target_to_host_timex(phtx, arg2) != 0) {
9927                 return -TARGET_EFAULT;
9928             }
9929             ret = get_errno(clock_adjtime(arg1, phtx));
9930             if (!is_error(ret) && phtx) {
9931                 if (host_to_target_timex(arg2, phtx) != 0) {
9932                     return -TARGET_EFAULT;
9933                 }
9934             }
9935         }
9936         return ret;
9937 #endif
9938     case TARGET_NR_getpgid:
9939         return get_errno(getpgid(arg1));
9940     case TARGET_NR_fchdir:
9941         return get_errno(fchdir(arg1));
9942     case TARGET_NR_personality:
9943         return get_errno(personality(arg1));
9944 #ifdef TARGET_NR__llseek /* Not on alpha */
9945     case TARGET_NR__llseek:
9946         {
9947             int64_t res;
9948 #if !defined(__NR_llseek)
9949             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9950             if (res == -1) {
9951                 ret = get_errno(res);
9952             } else {
9953                 ret = 0;
9954             }
9955 #else
9956             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9957 #endif
9958             if ((ret == 0) && put_user_s64(res, arg4)) {
9959                 return -TARGET_EFAULT;
9960             }
9961         }
9962         return ret;
9963 #endif
9964 #ifdef TARGET_NR_getdents
9965     case TARGET_NR_getdents:
9966 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9967 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9968         {
9969             struct target_dirent *target_dirp;
9970             struct linux_dirent *dirp;
9971             abi_long count = arg3;
9972 
9973             dirp = g_try_malloc(count);
9974             if (!dirp) {
9975                 return -TARGET_ENOMEM;
9976             }
9977 
9978             ret = get_errno(sys_getdents(arg1, dirp, count));
9979             if (!is_error(ret)) {
9980                 struct linux_dirent *de;
9981 		struct target_dirent *tde;
9982                 int len = ret;
9983                 int reclen, treclen;
9984 		int count1, tnamelen;
9985 
9986 		count1 = 0;
9987                 de = dirp;
9988                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9989                     return -TARGET_EFAULT;
9990 		tde = target_dirp;
9991                 while (len > 0) {
9992                     reclen = de->d_reclen;
9993                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9994                     assert(tnamelen >= 0);
9995                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
9996                     assert(count1 + treclen <= count);
9997                     tde->d_reclen = tswap16(treclen);
9998                     tde->d_ino = tswapal(de->d_ino);
9999                     tde->d_off = tswapal(de->d_off);
10000                     memcpy(tde->d_name, de->d_name, tnamelen);
10001                     de = (struct linux_dirent *)((char *)de + reclen);
10002                     len -= reclen;
10003                     tde = (struct target_dirent *)((char *)tde + treclen);
10004 		    count1 += treclen;
10005                 }
10006 		ret = count1;
10007                 unlock_user(target_dirp, arg2, ret);
10008             }
10009             g_free(dirp);
10010         }
10011 #else
10012         {
10013             struct linux_dirent *dirp;
10014             abi_long count = arg3;
10015 
10016             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10017                 return -TARGET_EFAULT;
10018             ret = get_errno(sys_getdents(arg1, dirp, count));
10019             if (!is_error(ret)) {
10020                 struct linux_dirent *de;
10021                 int len = ret;
10022                 int reclen;
10023                 de = dirp;
10024                 while (len > 0) {
10025                     reclen = de->d_reclen;
10026                     if (reclen > len)
10027                         break;
10028                     de->d_reclen = tswap16(reclen);
10029                     tswapls(&de->d_ino);
10030                     tswapls(&de->d_off);
10031                     de = (struct linux_dirent *)((char *)de + reclen);
10032                     len -= reclen;
10033                 }
10034             }
10035             unlock_user(dirp, arg2, ret);
10036         }
10037 #endif
10038 #else
10039         /* Implement getdents in terms of getdents64 */
10040         {
10041             struct linux_dirent64 *dirp;
10042             abi_long count = arg3;
10043 
10044             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10045             if (!dirp) {
10046                 return -TARGET_EFAULT;
10047             }
10048             ret = get_errno(sys_getdents64(arg1, dirp, count));
10049             if (!is_error(ret)) {
10050                 /* Convert the dirent64 structs to target dirent.  We do this
10051                  * in-place, since we can guarantee that a target_dirent is no
10052                  * larger than a dirent64; however this means we have to be
10053                  * careful to read everything before writing in the new format.
10054                  */
10055                 struct linux_dirent64 *de;
10056                 struct target_dirent *tde;
10057                 int len = ret;
10058                 int tlen = 0;
10059 
10060                 de = dirp;
10061                 tde = (struct target_dirent *)dirp;
10062                 while (len > 0) {
10063                     int namelen, treclen;
10064                     int reclen = de->d_reclen;
10065                     uint64_t ino = de->d_ino;
10066                     int64_t off = de->d_off;
10067                     uint8_t type = de->d_type;
10068 
10069                     namelen = strlen(de->d_name);
10070                     treclen = offsetof(struct target_dirent, d_name)
10071                         + namelen + 2;
10072                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10073 
10074                     memmove(tde->d_name, de->d_name, namelen + 1);
10075                     tde->d_ino = tswapal(ino);
10076                     tde->d_off = tswapal(off);
10077                     tde->d_reclen = tswap16(treclen);
10078                     /* The target_dirent type is in what was formerly a padding
10079                      * byte at the end of the structure:
10080                      */
10081                     *(((char *)tde) + treclen - 1) = type;
10082 
10083                     de = (struct linux_dirent64 *)((char *)de + reclen);
10084                     tde = (struct target_dirent *)((char *)tde + treclen);
10085                     len -= reclen;
10086                     tlen += treclen;
10087                 }
10088                 ret = tlen;
10089             }
10090             unlock_user(dirp, arg2, ret);
10091         }
10092 #endif
10093         return ret;
10094 #endif /* TARGET_NR_getdents */
10095 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10096     case TARGET_NR_getdents64:
10097         {
10098             struct linux_dirent64 *dirp;
10099             abi_long count = arg3;
10100             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10101                 return -TARGET_EFAULT;
10102             ret = get_errno(sys_getdents64(arg1, dirp, count));
10103             if (!is_error(ret)) {
10104                 struct linux_dirent64 *de;
10105                 int len = ret;
10106                 int reclen;
10107                 de = dirp;
10108                 while (len > 0) {
10109                     reclen = de->d_reclen;
10110                     if (reclen > len)
10111                         break;
10112                     de->d_reclen = tswap16(reclen);
10113                     tswap64s((uint64_t *)&de->d_ino);
10114                     tswap64s((uint64_t *)&de->d_off);
10115                     de = (struct linux_dirent64 *)((char *)de + reclen);
10116                     len -= reclen;
10117                 }
10118             }
10119             unlock_user(dirp, arg2, ret);
10120         }
10121         return ret;
10122 #endif /* TARGET_NR_getdents64 */
10123 #if defined(TARGET_NR__newselect)
10124     case TARGET_NR__newselect:
10125         return do_select(arg1, arg2, arg3, arg4, arg5);
10126 #endif
10127 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10128 # ifdef TARGET_NR_poll
10129     case TARGET_NR_poll:
10130 # endif
10131 # ifdef TARGET_NR_ppoll
10132     case TARGET_NR_ppoll:
10133 # endif
10134         {
10135             struct target_pollfd *target_pfd;
10136             unsigned int nfds = arg2;
10137             struct pollfd *pfd;
10138             unsigned int i;
10139 
10140             pfd = NULL;
10141             target_pfd = NULL;
10142             if (nfds) {
10143                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10144                     return -TARGET_EINVAL;
10145                 }
10146 
10147                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10148                                        sizeof(struct target_pollfd) * nfds, 1);
10149                 if (!target_pfd) {
10150                     return -TARGET_EFAULT;
10151                 }
10152 
10153                 pfd = alloca(sizeof(struct pollfd) * nfds);
10154                 for (i = 0; i < nfds; i++) {
10155                     pfd[i].fd = tswap32(target_pfd[i].fd);
10156                     pfd[i].events = tswap16(target_pfd[i].events);
10157                 }
10158             }
10159 
10160             switch (num) {
10161 # ifdef TARGET_NR_ppoll
10162             case TARGET_NR_ppoll:
10163             {
10164                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10165                 target_sigset_t *target_set;
10166                 sigset_t _set, *set = &_set;
10167 
10168                 if (arg3) {
10169                     if (target_to_host_timespec(timeout_ts, arg3)) {
10170                         unlock_user(target_pfd, arg1, 0);
10171                         return -TARGET_EFAULT;
10172                     }
10173                 } else {
10174                     timeout_ts = NULL;
10175                 }
10176 
10177                 if (arg4) {
10178                     if (arg5 != sizeof(target_sigset_t)) {
10179                         unlock_user(target_pfd, arg1, 0);
10180                         return -TARGET_EINVAL;
10181                     }
10182 
10183                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10184                     if (!target_set) {
10185                         unlock_user(target_pfd, arg1, 0);
10186                         return -TARGET_EFAULT;
10187                     }
10188                     target_to_host_sigset(set, target_set);
10189                 } else {
10190                     set = NULL;
10191                 }
10192 
10193                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10194                                            set, SIGSET_T_SIZE));
10195 
10196                 if (!is_error(ret) && arg3) {
10197                     host_to_target_timespec(arg3, timeout_ts);
10198                 }
10199                 if (arg4) {
10200                     unlock_user(target_set, arg4, 0);
10201                 }
10202                 break;
10203             }
10204 # endif
10205 # ifdef TARGET_NR_poll
10206             case TARGET_NR_poll:
10207             {
10208                 struct timespec ts, *pts;
10209 
10210                 if (arg3 >= 0) {
10211                     /* Convert ms to secs, ns */
10212                     ts.tv_sec = arg3 / 1000;
10213                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10214                     pts = &ts;
10215                 } else {
10216                     /* -ve poll() timeout means "infinite" */
10217                     pts = NULL;
10218                 }
10219                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10220                 break;
10221             }
10222 # endif
10223             default:
10224                 g_assert_not_reached();
10225             }
10226 
10227             if (!is_error(ret)) {
10228                 for(i = 0; i < nfds; i++) {
10229                     target_pfd[i].revents = tswap16(pfd[i].revents);
10230                 }
10231             }
10232             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10233         }
10234         return ret;
10235 #endif
10236     case TARGET_NR_flock:
10237         /* NOTE: the flock constant seems to be the same for every
10238            Linux platform */
10239         return get_errno(safe_flock(arg1, arg2));
10240     case TARGET_NR_readv:
10241         {
10242             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10243             if (vec != NULL) {
10244                 ret = get_errno(safe_readv(arg1, vec, arg3));
10245                 unlock_iovec(vec, arg2, arg3, 1);
10246             } else {
10247                 ret = -host_to_target_errno(errno);
10248             }
10249         }
10250         return ret;
10251     case TARGET_NR_writev:
10252         {
10253             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10254             if (vec != NULL) {
10255                 ret = get_errno(safe_writev(arg1, vec, arg3));
10256                 unlock_iovec(vec, arg2, arg3, 0);
10257             } else {
10258                 ret = -host_to_target_errno(errno);
10259             }
10260         }
10261         return ret;
10262 #if defined(TARGET_NR_preadv)
10263     case TARGET_NR_preadv:
10264         {
10265             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10266             if (vec != NULL) {
10267                 unsigned long low, high;
10268 
10269                 target_to_host_low_high(arg4, arg5, &low, &high);
10270                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10271                 unlock_iovec(vec, arg2, arg3, 1);
10272             } else {
10273                 ret = -host_to_target_errno(errno);
10274            }
10275         }
10276         return ret;
10277 #endif
10278 #if defined(TARGET_NR_pwritev)
10279     case TARGET_NR_pwritev:
10280         {
10281             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10282             if (vec != NULL) {
10283                 unsigned long low, high;
10284 
10285                 target_to_host_low_high(arg4, arg5, &low, &high);
10286                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10287                 unlock_iovec(vec, arg2, arg3, 0);
10288             } else {
10289                 ret = -host_to_target_errno(errno);
10290            }
10291         }
10292         return ret;
10293 #endif
10294     case TARGET_NR_getsid:
10295         return get_errno(getsid(arg1));
10296 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10297     case TARGET_NR_fdatasync:
10298         return get_errno(fdatasync(arg1));
10299 #endif
10300 #ifdef TARGET_NR__sysctl
10301     case TARGET_NR__sysctl:
10302         /* We don't implement this, but ENOTDIR is always a safe
10303            return value. */
10304         return -TARGET_ENOTDIR;
10305 #endif
10306     case TARGET_NR_sched_getaffinity:
10307         {
10308             unsigned int mask_size;
10309             unsigned long *mask;
10310 
10311             /*
10312              * sched_getaffinity needs multiples of ulong, so need to take
10313              * care of mismatches between target ulong and host ulong sizes.
10314              */
10315             if (arg2 & (sizeof(abi_ulong) - 1)) {
10316                 return -TARGET_EINVAL;
10317             }
10318             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10319 
10320             mask = alloca(mask_size);
10321             memset(mask, 0, mask_size);
10322             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10323 
10324             if (!is_error(ret)) {
10325                 if (ret > arg2) {
10326                     /* More data returned than the caller's buffer will fit.
10327                      * This only happens if sizeof(abi_long) < sizeof(long)
10328                      * and the caller passed us a buffer holding an odd number
10329                      * of abi_longs. If the host kernel is actually using the
10330                      * extra 4 bytes then fail EINVAL; otherwise we can just
10331                      * ignore them and only copy the interesting part.
10332                      */
10333                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10334                     if (numcpus > arg2 * 8) {
10335                         return -TARGET_EINVAL;
10336                     }
10337                     ret = arg2;
10338                 }
10339 
10340                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10341                     return -TARGET_EFAULT;
10342                 }
10343             }
10344         }
10345         return ret;
10346     case TARGET_NR_sched_setaffinity:
10347         {
10348             unsigned int mask_size;
10349             unsigned long *mask;
10350 
10351             /*
10352              * sched_setaffinity needs multiples of ulong, so need to take
10353              * care of mismatches between target ulong and host ulong sizes.
10354              */
10355             if (arg2 & (sizeof(abi_ulong) - 1)) {
10356                 return -TARGET_EINVAL;
10357             }
10358             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10359             mask = alloca(mask_size);
10360 
10361             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10362             if (ret) {
10363                 return ret;
10364             }
10365 
10366             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10367         }
10368     case TARGET_NR_getcpu:
10369         {
10370             unsigned cpu, node;
10371             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10372                                        arg2 ? &node : NULL,
10373                                        NULL));
10374             if (is_error(ret)) {
10375                 return ret;
10376             }
10377             if (arg1 && put_user_u32(cpu, arg1)) {
10378                 return -TARGET_EFAULT;
10379             }
10380             if (arg2 && put_user_u32(node, arg2)) {
10381                 return -TARGET_EFAULT;
10382             }
10383         }
10384         return ret;
10385     case TARGET_NR_sched_setparam:
10386         {
10387             struct sched_param *target_schp;
10388             struct sched_param schp;
10389 
10390             if (arg2 == 0) {
10391                 return -TARGET_EINVAL;
10392             }
10393             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10394                 return -TARGET_EFAULT;
10395             schp.sched_priority = tswap32(target_schp->sched_priority);
10396             unlock_user_struct(target_schp, arg2, 0);
10397             return get_errno(sched_setparam(arg1, &schp));
10398         }
10399     case TARGET_NR_sched_getparam:
10400         {
10401             struct sched_param *target_schp;
10402             struct sched_param schp;
10403 
10404             if (arg2 == 0) {
10405                 return -TARGET_EINVAL;
10406             }
10407             ret = get_errno(sched_getparam(arg1, &schp));
10408             if (!is_error(ret)) {
10409                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10410                     return -TARGET_EFAULT;
10411                 target_schp->sched_priority = tswap32(schp.sched_priority);
10412                 unlock_user_struct(target_schp, arg2, 1);
10413             }
10414         }
10415         return ret;
10416     case TARGET_NR_sched_setscheduler:
10417         {
10418             struct sched_param *target_schp;
10419             struct sched_param schp;
10420             if (arg3 == 0) {
10421                 return -TARGET_EINVAL;
10422             }
10423             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10424                 return -TARGET_EFAULT;
10425             schp.sched_priority = tswap32(target_schp->sched_priority);
10426             unlock_user_struct(target_schp, arg3, 0);
10427             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10428         }
10429     case TARGET_NR_sched_getscheduler:
10430         return get_errno(sched_getscheduler(arg1));
10431     case TARGET_NR_sched_yield:
10432         return get_errno(sched_yield());
10433     case TARGET_NR_sched_get_priority_max:
10434         return get_errno(sched_get_priority_max(arg1));
10435     case TARGET_NR_sched_get_priority_min:
10436         return get_errno(sched_get_priority_min(arg1));
10437 #ifdef TARGET_NR_sched_rr_get_interval
10438     case TARGET_NR_sched_rr_get_interval:
10439         {
10440             struct timespec ts;
10441             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10442             if (!is_error(ret)) {
10443                 ret = host_to_target_timespec(arg2, &ts);
10444             }
10445         }
10446         return ret;
10447 #endif
10448 #if defined(TARGET_NR_nanosleep)
10449     case TARGET_NR_nanosleep:
10450         {
10451             struct timespec req, rem;
10452             target_to_host_timespec(&req, arg1);
10453             ret = get_errno(safe_nanosleep(&req, &rem));
10454             if (is_error(ret) && arg2) {
10455                 host_to_target_timespec(arg2, &rem);
10456             }
10457         }
10458         return ret;
10459 #endif
10460     case TARGET_NR_prctl:
10461         switch (arg1) {
10462         case PR_GET_PDEATHSIG:
10463         {
10464             int deathsig;
10465             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10466             if (!is_error(ret) && arg2
10467                 && put_user_ual(deathsig, arg2)) {
10468                 return -TARGET_EFAULT;
10469             }
10470             return ret;
10471         }
10472 #ifdef PR_GET_NAME
10473         case PR_GET_NAME:
10474         {
10475             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10476             if (!name) {
10477                 return -TARGET_EFAULT;
10478             }
10479             ret = get_errno(prctl(arg1, (unsigned long)name,
10480                                   arg3, arg4, arg5));
10481             unlock_user(name, arg2, 16);
10482             return ret;
10483         }
10484         case PR_SET_NAME:
10485         {
10486             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10487             if (!name) {
10488                 return -TARGET_EFAULT;
10489             }
10490             ret = get_errno(prctl(arg1, (unsigned long)name,
10491                                   arg3, arg4, arg5));
10492             unlock_user(name, arg2, 0);
10493             return ret;
10494         }
10495 #endif
10496 #ifdef TARGET_MIPS
10497         case TARGET_PR_GET_FP_MODE:
10498         {
10499             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10500             ret = 0;
10501             if (env->CP0_Status & (1 << CP0St_FR)) {
10502                 ret |= TARGET_PR_FP_MODE_FR;
10503             }
10504             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10505                 ret |= TARGET_PR_FP_MODE_FRE;
10506             }
10507             return ret;
10508         }
10509         case TARGET_PR_SET_FP_MODE:
10510         {
10511             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10512             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10513             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10514             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10515             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10516 
10517             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10518                                             TARGET_PR_FP_MODE_FRE;
10519 
10520             /* If nothing to change, return right away, successfully.  */
10521             if (old_fr == new_fr && old_fre == new_fre) {
10522                 return 0;
10523             }
10524             /* Check the value is valid */
10525             if (arg2 & ~known_bits) {
10526                 return -TARGET_EOPNOTSUPP;
10527             }
10528             /* Setting FRE without FR is not supported.  */
10529             if (new_fre && !new_fr) {
10530                 return -TARGET_EOPNOTSUPP;
10531             }
10532             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10533                 /* FR1 is not supported */
10534                 return -TARGET_EOPNOTSUPP;
10535             }
10536             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10537                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10538                 /* cannot set FR=0 */
10539                 return -TARGET_EOPNOTSUPP;
10540             }
10541             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10542                 /* Cannot set FRE=1 */
10543                 return -TARGET_EOPNOTSUPP;
10544             }
10545 
10546             int i;
10547             fpr_t *fpr = env->active_fpu.fpr;
10548             for (i = 0; i < 32 ; i += 2) {
10549                 if (!old_fr && new_fr) {
10550                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10551                 } else if (old_fr && !new_fr) {
10552                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10553                 }
10554             }
10555 
10556             if (new_fr) {
10557                 env->CP0_Status |= (1 << CP0St_FR);
10558                 env->hflags |= MIPS_HFLAG_F64;
10559             } else {
10560                 env->CP0_Status &= ~(1 << CP0St_FR);
10561                 env->hflags &= ~MIPS_HFLAG_F64;
10562             }
10563             if (new_fre) {
10564                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10565                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10566                     env->hflags |= MIPS_HFLAG_FRE;
10567                 }
10568             } else {
10569                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10570                 env->hflags &= ~MIPS_HFLAG_FRE;
10571             }
10572 
10573             return 0;
10574         }
10575 #endif /* MIPS */
10576 #ifdef TARGET_AARCH64
10577         case TARGET_PR_SVE_SET_VL:
10578             /*
10579              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10580              * PR_SVE_VL_INHERIT.  Note the kernel definition
10581              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10582              * even though the current architectural maximum is VQ=16.
10583              */
10584             ret = -TARGET_EINVAL;
10585             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10586                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10587                 CPUARMState *env = cpu_env;
10588                 ARMCPU *cpu = env_archcpu(env);
10589                 uint32_t vq, old_vq;
10590 
10591                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10592                 vq = MAX(arg2 / 16, 1);
10593                 vq = MIN(vq, cpu->sve_max_vq);
10594 
10595                 if (vq < old_vq) {
10596                     aarch64_sve_narrow_vq(env, vq);
10597                 }
10598                 env->vfp.zcr_el[1] = vq - 1;
10599                 arm_rebuild_hflags(env);
10600                 ret = vq * 16;
10601             }
10602             return ret;
10603         case TARGET_PR_SVE_GET_VL:
10604             ret = -TARGET_EINVAL;
10605             {
10606                 ARMCPU *cpu = env_archcpu(cpu_env);
10607                 if (cpu_isar_feature(aa64_sve, cpu)) {
10608                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10609                 }
10610             }
10611             return ret;
10612         case TARGET_PR_PAC_RESET_KEYS:
10613             {
10614                 CPUARMState *env = cpu_env;
10615                 ARMCPU *cpu = env_archcpu(env);
10616 
10617                 if (arg3 || arg4 || arg5) {
10618                     return -TARGET_EINVAL;
10619                 }
10620                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10621                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10622                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10623                                TARGET_PR_PAC_APGAKEY);
10624                     int ret = 0;
10625                     Error *err = NULL;
10626 
10627                     if (arg2 == 0) {
10628                         arg2 = all;
10629                     } else if (arg2 & ~all) {
10630                         return -TARGET_EINVAL;
10631                     }
10632                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10633                         ret |= qemu_guest_getrandom(&env->keys.apia,
10634                                                     sizeof(ARMPACKey), &err);
10635                     }
10636                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10637                         ret |= qemu_guest_getrandom(&env->keys.apib,
10638                                                     sizeof(ARMPACKey), &err);
10639                     }
10640                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10641                         ret |= qemu_guest_getrandom(&env->keys.apda,
10642                                                     sizeof(ARMPACKey), &err);
10643                     }
10644                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10645                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10646                                                     sizeof(ARMPACKey), &err);
10647                     }
10648                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10649                         ret |= qemu_guest_getrandom(&env->keys.apga,
10650                                                     sizeof(ARMPACKey), &err);
10651                     }
10652                     if (ret != 0) {
10653                         /*
10654                          * Some unknown failure in the crypto.  The best
10655                          * we can do is log it and fail the syscall.
10656                          * The real syscall cannot fail this way.
10657                          */
10658                         qemu_log_mask(LOG_UNIMP,
10659                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10660                                       error_get_pretty(err));
10661                         error_free(err);
10662                         return -TARGET_EIO;
10663                     }
10664                     return 0;
10665                 }
10666             }
10667             return -TARGET_EINVAL;
10668 #endif /* AARCH64 */
10669         case PR_GET_SECCOMP:
10670         case PR_SET_SECCOMP:
10671             /* Disable seccomp to prevent the target disabling syscalls we
10672              * need. */
10673             return -TARGET_EINVAL;
10674         default:
10675             /* Most prctl options have no pointer arguments */
10676             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10677         }
10678         break;
10679 #ifdef TARGET_NR_arch_prctl
10680     case TARGET_NR_arch_prctl:
10681         return do_arch_prctl(cpu_env, arg1, arg2);
10682 #endif
10683 #ifdef TARGET_NR_pread64
10684     case TARGET_NR_pread64:
10685         if (regpairs_aligned(cpu_env, num)) {
10686             arg4 = arg5;
10687             arg5 = arg6;
10688         }
10689         if (arg2 == 0 && arg3 == 0) {
10690             /* Special-case NULL buffer and zero length, which should succeed */
10691             p = 0;
10692         } else {
10693             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10694             if (!p) {
10695                 return -TARGET_EFAULT;
10696             }
10697         }
10698         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10699         unlock_user(p, arg2, ret);
10700         return ret;
10701     case TARGET_NR_pwrite64:
10702         if (regpairs_aligned(cpu_env, num)) {
10703             arg4 = arg5;
10704             arg5 = arg6;
10705         }
10706         if (arg2 == 0 && arg3 == 0) {
10707             /* Special-case NULL buffer and zero length, which should succeed */
10708             p = 0;
10709         } else {
10710             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10711             if (!p) {
10712                 return -TARGET_EFAULT;
10713             }
10714         }
10715         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10716         unlock_user(p, arg2, 0);
10717         return ret;
10718 #endif
10719     case TARGET_NR_getcwd:
10720         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10721             return -TARGET_EFAULT;
10722         ret = get_errno(sys_getcwd1(p, arg2));
10723         unlock_user(p, arg1, ret);
10724         return ret;
10725     case TARGET_NR_capget:
10726     case TARGET_NR_capset:
10727     {
10728         struct target_user_cap_header *target_header;
10729         struct target_user_cap_data *target_data = NULL;
10730         struct __user_cap_header_struct header;
10731         struct __user_cap_data_struct data[2];
10732         struct __user_cap_data_struct *dataptr = NULL;
10733         int i, target_datalen;
10734         int data_items = 1;
10735 
10736         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10737             return -TARGET_EFAULT;
10738         }
10739         header.version = tswap32(target_header->version);
10740         header.pid = tswap32(target_header->pid);
10741 
10742         if (header.version != _LINUX_CAPABILITY_VERSION) {
10743             /* Version 2 and up takes pointer to two user_data structs */
10744             data_items = 2;
10745         }
10746 
10747         target_datalen = sizeof(*target_data) * data_items;
10748 
10749         if (arg2) {
10750             if (num == TARGET_NR_capget) {
10751                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10752             } else {
10753                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10754             }
10755             if (!target_data) {
10756                 unlock_user_struct(target_header, arg1, 0);
10757                 return -TARGET_EFAULT;
10758             }
10759 
10760             if (num == TARGET_NR_capset) {
10761                 for (i = 0; i < data_items; i++) {
10762                     data[i].effective = tswap32(target_data[i].effective);
10763                     data[i].permitted = tswap32(target_data[i].permitted);
10764                     data[i].inheritable = tswap32(target_data[i].inheritable);
10765                 }
10766             }
10767 
10768             dataptr = data;
10769         }
10770 
10771         if (num == TARGET_NR_capget) {
10772             ret = get_errno(capget(&header, dataptr));
10773         } else {
10774             ret = get_errno(capset(&header, dataptr));
10775         }
10776 
10777         /* The kernel always updates version for both capget and capset */
10778         target_header->version = tswap32(header.version);
10779         unlock_user_struct(target_header, arg1, 1);
10780 
10781         if (arg2) {
10782             if (num == TARGET_NR_capget) {
10783                 for (i = 0; i < data_items; i++) {
10784                     target_data[i].effective = tswap32(data[i].effective);
10785                     target_data[i].permitted = tswap32(data[i].permitted);
10786                     target_data[i].inheritable = tswap32(data[i].inheritable);
10787                 }
10788                 unlock_user(target_data, arg2, target_datalen);
10789             } else {
10790                 unlock_user(target_data, arg2, 0);
10791             }
10792         }
10793         return ret;
10794     }
10795     case TARGET_NR_sigaltstack:
10796         return do_sigaltstack(arg1, arg2,
10797                               get_sp_from_cpustate((CPUArchState *)cpu_env));
10798 
10799 #ifdef CONFIG_SENDFILE
10800 #ifdef TARGET_NR_sendfile
10801     case TARGET_NR_sendfile:
10802     {
10803         off_t *offp = NULL;
10804         off_t off;
10805         if (arg3) {
10806             ret = get_user_sal(off, arg3);
10807             if (is_error(ret)) {
10808                 return ret;
10809             }
10810             offp = &off;
10811         }
10812         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10813         if (!is_error(ret) && arg3) {
10814             abi_long ret2 = put_user_sal(off, arg3);
10815             if (is_error(ret2)) {
10816                 ret = ret2;
10817             }
10818         }
10819         return ret;
10820     }
10821 #endif
10822 #ifdef TARGET_NR_sendfile64
10823     case TARGET_NR_sendfile64:
10824     {
10825         off_t *offp = NULL;
10826         off_t off;
10827         if (arg3) {
10828             ret = get_user_s64(off, arg3);
10829             if (is_error(ret)) {
10830                 return ret;
10831             }
10832             offp = &off;
10833         }
10834         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10835         if (!is_error(ret) && arg3) {
10836             abi_long ret2 = put_user_s64(off, arg3);
10837             if (is_error(ret2)) {
10838                 ret = ret2;
10839             }
10840         }
10841         return ret;
10842     }
10843 #endif
10844 #endif
10845 #ifdef TARGET_NR_vfork
10846     case TARGET_NR_vfork:
10847         return get_errno(do_fork(cpu_env,
10848                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10849                          0, 0, 0, 0));
10850 #endif
10851 #ifdef TARGET_NR_ugetrlimit
10852     case TARGET_NR_ugetrlimit:
10853     {
10854 	struct rlimit rlim;
10855 	int resource = target_to_host_resource(arg1);
10856 	ret = get_errno(getrlimit(resource, &rlim));
10857 	if (!is_error(ret)) {
10858 	    struct target_rlimit *target_rlim;
10859             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10860                 return -TARGET_EFAULT;
10861 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10862 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10863             unlock_user_struct(target_rlim, arg2, 1);
10864 	}
10865         return ret;
10866     }
10867 #endif
10868 #ifdef TARGET_NR_truncate64
10869     case TARGET_NR_truncate64:
10870         if (!(p = lock_user_string(arg1)))
10871             return -TARGET_EFAULT;
10872 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10873         unlock_user(p, arg1, 0);
10874         return ret;
10875 #endif
10876 #ifdef TARGET_NR_ftruncate64
10877     case TARGET_NR_ftruncate64:
10878         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10879 #endif
10880 #ifdef TARGET_NR_stat64
10881     case TARGET_NR_stat64:
10882         if (!(p = lock_user_string(arg1))) {
10883             return -TARGET_EFAULT;
10884         }
10885         ret = get_errno(stat(path(p), &st));
10886         unlock_user(p, arg1, 0);
10887         if (!is_error(ret))
10888             ret = host_to_target_stat64(cpu_env, arg2, &st);
10889         return ret;
10890 #endif
10891 #ifdef TARGET_NR_lstat64
10892     case TARGET_NR_lstat64:
10893         if (!(p = lock_user_string(arg1))) {
10894             return -TARGET_EFAULT;
10895         }
10896         ret = get_errno(lstat(path(p), &st));
10897         unlock_user(p, arg1, 0);
10898         if (!is_error(ret))
10899             ret = host_to_target_stat64(cpu_env, arg2, &st);
10900         return ret;
10901 #endif
10902 #ifdef TARGET_NR_fstat64
10903     case TARGET_NR_fstat64:
10904         ret = get_errno(fstat(arg1, &st));
10905         if (!is_error(ret))
10906             ret = host_to_target_stat64(cpu_env, arg2, &st);
10907         return ret;
10908 #endif
10909 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10910 #ifdef TARGET_NR_fstatat64
10911     case TARGET_NR_fstatat64:
10912 #endif
10913 #ifdef TARGET_NR_newfstatat
10914     case TARGET_NR_newfstatat:
10915 #endif
10916         if (!(p = lock_user_string(arg2))) {
10917             return -TARGET_EFAULT;
10918         }
10919         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10920         unlock_user(p, arg2, 0);
10921         if (!is_error(ret))
10922             ret = host_to_target_stat64(cpu_env, arg3, &st);
10923         return ret;
10924 #endif
10925 #if defined(TARGET_NR_statx)
10926     case TARGET_NR_statx:
10927         {
10928             struct target_statx *target_stx;
10929             int dirfd = arg1;
10930             int flags = arg3;
10931 
10932             p = lock_user_string(arg2);
10933             if (p == NULL) {
10934                 return -TARGET_EFAULT;
10935             }
10936 #if defined(__NR_statx)
10937             {
10938                 /*
10939                  * It is assumed that struct statx is architecture independent.
10940                  */
10941                 struct target_statx host_stx;
10942                 int mask = arg4;
10943 
10944                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10945                 if (!is_error(ret)) {
10946                     if (host_to_target_statx(&host_stx, arg5) != 0) {
10947                         unlock_user(p, arg2, 0);
10948                         return -TARGET_EFAULT;
10949                     }
10950                 }
10951 
10952                 if (ret != -TARGET_ENOSYS) {
10953                     unlock_user(p, arg2, 0);
10954                     return ret;
10955                 }
10956             }
10957 #endif
10958             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10959             unlock_user(p, arg2, 0);
10960 
10961             if (!is_error(ret)) {
10962                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10963                     return -TARGET_EFAULT;
10964                 }
10965                 memset(target_stx, 0, sizeof(*target_stx));
10966                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10967                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10968                 __put_user(st.st_ino, &target_stx->stx_ino);
10969                 __put_user(st.st_mode, &target_stx->stx_mode);
10970                 __put_user(st.st_uid, &target_stx->stx_uid);
10971                 __put_user(st.st_gid, &target_stx->stx_gid);
10972                 __put_user(st.st_nlink, &target_stx->stx_nlink);
10973                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10974                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10975                 __put_user(st.st_size, &target_stx->stx_size);
10976                 __put_user(st.st_blksize, &target_stx->stx_blksize);
10977                 __put_user(st.st_blocks, &target_stx->stx_blocks);
10978                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10979                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10980                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10981                 unlock_user_struct(target_stx, arg5, 1);
10982             }
10983         }
10984         return ret;
10985 #endif
10986 #ifdef TARGET_NR_lchown
10987     case TARGET_NR_lchown:
10988         if (!(p = lock_user_string(arg1)))
10989             return -TARGET_EFAULT;
10990         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10991         unlock_user(p, arg1, 0);
10992         return ret;
10993 #endif
10994 #ifdef TARGET_NR_getuid
10995     case TARGET_NR_getuid:
10996         return get_errno(high2lowuid(getuid()));
10997 #endif
10998 #ifdef TARGET_NR_getgid
10999     case TARGET_NR_getgid:
11000         return get_errno(high2lowgid(getgid()));
11001 #endif
11002 #ifdef TARGET_NR_geteuid
11003     case TARGET_NR_geteuid:
11004         return get_errno(high2lowuid(geteuid()));
11005 #endif
11006 #ifdef TARGET_NR_getegid
11007     case TARGET_NR_getegid:
11008         return get_errno(high2lowgid(getegid()));
11009 #endif
11010     case TARGET_NR_setreuid:
11011         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11012     case TARGET_NR_setregid:
11013         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11014     case TARGET_NR_getgroups:
11015         {
11016             int gidsetsize = arg1;
11017             target_id *target_grouplist;
11018             gid_t *grouplist;
11019             int i;
11020 
11021             grouplist = alloca(gidsetsize * sizeof(gid_t));
11022             ret = get_errno(getgroups(gidsetsize, grouplist));
11023             if (gidsetsize == 0)
11024                 return ret;
11025             if (!is_error(ret)) {
11026                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11027                 if (!target_grouplist)
11028                     return -TARGET_EFAULT;
11029                 for(i = 0;i < ret; i++)
11030                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11031                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11032             }
11033         }
11034         return ret;
11035     case TARGET_NR_setgroups:
11036         {
11037             int gidsetsize = arg1;
11038             target_id *target_grouplist;
11039             gid_t *grouplist = NULL;
11040             int i;
11041             if (gidsetsize) {
11042                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11043                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11044                 if (!target_grouplist) {
11045                     return -TARGET_EFAULT;
11046                 }
11047                 for (i = 0; i < gidsetsize; i++) {
11048                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11049                 }
11050                 unlock_user(target_grouplist, arg2, 0);
11051             }
11052             return get_errno(setgroups(gidsetsize, grouplist));
11053         }
11054     case TARGET_NR_fchown:
11055         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11056 #if defined(TARGET_NR_fchownat)
11057     case TARGET_NR_fchownat:
11058         if (!(p = lock_user_string(arg2)))
11059             return -TARGET_EFAULT;
11060         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11061                                  low2highgid(arg4), arg5));
11062         unlock_user(p, arg2, 0);
11063         return ret;
11064 #endif
11065 #ifdef TARGET_NR_setresuid
11066     case TARGET_NR_setresuid:
11067         return get_errno(sys_setresuid(low2highuid(arg1),
11068                                        low2highuid(arg2),
11069                                        low2highuid(arg3)));
11070 #endif
11071 #ifdef TARGET_NR_getresuid
11072     case TARGET_NR_getresuid:
11073         {
11074             uid_t ruid, euid, suid;
11075             ret = get_errno(getresuid(&ruid, &euid, &suid));
11076             if (!is_error(ret)) {
11077                 if (put_user_id(high2lowuid(ruid), arg1)
11078                     || put_user_id(high2lowuid(euid), arg2)
11079                     || put_user_id(high2lowuid(suid), arg3))
11080                     return -TARGET_EFAULT;
11081             }
11082         }
11083         return ret;
11084 #endif
11085 #ifdef TARGET_NR_getresgid
11086     case TARGET_NR_setresgid:
11087         return get_errno(sys_setresgid(low2highgid(arg1),
11088                                        low2highgid(arg2),
11089                                        low2highgid(arg3)));
11090 #endif
11091 #ifdef TARGET_NR_getresgid
11092     case TARGET_NR_getresgid:
11093         {
11094             gid_t rgid, egid, sgid;
11095             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11096             if (!is_error(ret)) {
11097                 if (put_user_id(high2lowgid(rgid), arg1)
11098                     || put_user_id(high2lowgid(egid), arg2)
11099                     || put_user_id(high2lowgid(sgid), arg3))
11100                     return -TARGET_EFAULT;
11101             }
11102         }
11103         return ret;
11104 #endif
11105 #ifdef TARGET_NR_chown
11106     case TARGET_NR_chown:
11107         if (!(p = lock_user_string(arg1)))
11108             return -TARGET_EFAULT;
11109         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11110         unlock_user(p, arg1, 0);
11111         return ret;
11112 #endif
11113     case TARGET_NR_setuid:
11114         return get_errno(sys_setuid(low2highuid(arg1)));
11115     case TARGET_NR_setgid:
11116         return get_errno(sys_setgid(low2highgid(arg1)));
11117     case TARGET_NR_setfsuid:
11118         return get_errno(setfsuid(arg1));
11119     case TARGET_NR_setfsgid:
11120         return get_errno(setfsgid(arg1));
11121 
11122 #ifdef TARGET_NR_lchown32
11123     case TARGET_NR_lchown32:
11124         if (!(p = lock_user_string(arg1)))
11125             return -TARGET_EFAULT;
11126         ret = get_errno(lchown(p, arg2, arg3));
11127         unlock_user(p, arg1, 0);
11128         return ret;
11129 #endif
11130 #ifdef TARGET_NR_getuid32
11131     case TARGET_NR_getuid32:
11132         return get_errno(getuid());
11133 #endif
11134 
11135 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11136    /* Alpha specific */
11137     case TARGET_NR_getxuid:
11138          {
11139             uid_t euid;
11140             euid=geteuid();
11141             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11142          }
11143         return get_errno(getuid());
11144 #endif
11145 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11146    /* Alpha specific */
11147     case TARGET_NR_getxgid:
11148          {
11149             uid_t egid;
11150             egid=getegid();
11151             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11152          }
11153         return get_errno(getgid());
11154 #endif
11155 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11156     /* Alpha specific */
11157     case TARGET_NR_osf_getsysinfo:
11158         ret = -TARGET_EOPNOTSUPP;
11159         switch (arg1) {
11160           case TARGET_GSI_IEEE_FP_CONTROL:
11161             {
11162                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11163                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11164 
11165                 swcr &= ~SWCR_STATUS_MASK;
11166                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11167 
11168                 if (put_user_u64 (swcr, arg2))
11169                         return -TARGET_EFAULT;
11170                 ret = 0;
11171             }
11172             break;
11173 
11174           /* case GSI_IEEE_STATE_AT_SIGNAL:
11175              -- Not implemented in linux kernel.
11176              case GSI_UACPROC:
11177              -- Retrieves current unaligned access state; not much used.
11178              case GSI_PROC_TYPE:
11179              -- Retrieves implver information; surely not used.
11180              case GSI_GET_HWRPB:
11181              -- Grabs a copy of the HWRPB; surely not used.
11182           */
11183         }
11184         return ret;
11185 #endif
11186 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11187     /* Alpha specific */
11188     case TARGET_NR_osf_setsysinfo:
11189         ret = -TARGET_EOPNOTSUPP;
11190         switch (arg1) {
11191           case TARGET_SSI_IEEE_FP_CONTROL:
11192             {
11193                 uint64_t swcr, fpcr;
11194 
11195                 if (get_user_u64 (swcr, arg2)) {
11196                     return -TARGET_EFAULT;
11197                 }
11198 
11199                 /*
11200                  * The kernel calls swcr_update_status to update the
11201                  * status bits from the fpcr at every point that it
11202                  * could be queried.  Therefore, we store the status
11203                  * bits only in FPCR.
11204                  */
11205                 ((CPUAlphaState *)cpu_env)->swcr
11206                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11207 
11208                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11209                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11210                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11211                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11212                 ret = 0;
11213             }
11214             break;
11215 
11216           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11217             {
11218                 uint64_t exc, fpcr, fex;
11219 
11220                 if (get_user_u64(exc, arg2)) {
11221                     return -TARGET_EFAULT;
11222                 }
11223                 exc &= SWCR_STATUS_MASK;
11224                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11225 
11226                 /* Old exceptions are not signaled.  */
11227                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11228                 fex = exc & ~fex;
11229                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11230                 fex &= ((CPUArchState *)cpu_env)->swcr;
11231 
11232                 /* Update the hardware fpcr.  */
11233                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11234                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11235 
11236                 if (fex) {
11237                     int si_code = TARGET_FPE_FLTUNK;
11238                     target_siginfo_t info;
11239 
11240                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11241                         si_code = TARGET_FPE_FLTUND;
11242                     }
11243                     if (fex & SWCR_TRAP_ENABLE_INE) {
11244                         si_code = TARGET_FPE_FLTRES;
11245                     }
11246                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11247                         si_code = TARGET_FPE_FLTUND;
11248                     }
11249                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11250                         si_code = TARGET_FPE_FLTOVF;
11251                     }
11252                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11253                         si_code = TARGET_FPE_FLTDIV;
11254                     }
11255                     if (fex & SWCR_TRAP_ENABLE_INV) {
11256                         si_code = TARGET_FPE_FLTINV;
11257                     }
11258 
11259                     info.si_signo = SIGFPE;
11260                     info.si_errno = 0;
11261                     info.si_code = si_code;
11262                     info._sifields._sigfault._addr
11263                         = ((CPUArchState *)cpu_env)->pc;
11264                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11265                                  QEMU_SI_FAULT, &info);
11266                 }
11267                 ret = 0;
11268             }
11269             break;
11270 
11271           /* case SSI_NVPAIRS:
11272              -- Used with SSIN_UACPROC to enable unaligned accesses.
11273              case SSI_IEEE_STATE_AT_SIGNAL:
11274              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11275              -- Not implemented in linux kernel
11276           */
11277         }
11278         return ret;
11279 #endif
11280 #ifdef TARGET_NR_osf_sigprocmask
11281     /* Alpha specific.  */
11282     case TARGET_NR_osf_sigprocmask:
11283         {
11284             abi_ulong mask;
11285             int how;
11286             sigset_t set, oldset;
11287 
11288             switch(arg1) {
11289             case TARGET_SIG_BLOCK:
11290                 how = SIG_BLOCK;
11291                 break;
11292             case TARGET_SIG_UNBLOCK:
11293                 how = SIG_UNBLOCK;
11294                 break;
11295             case TARGET_SIG_SETMASK:
11296                 how = SIG_SETMASK;
11297                 break;
11298             default:
11299                 return -TARGET_EINVAL;
11300             }
11301             mask = arg2;
11302             target_to_host_old_sigset(&set, &mask);
11303             ret = do_sigprocmask(how, &set, &oldset);
11304             if (!ret) {
11305                 host_to_target_old_sigset(&mask, &oldset);
11306                 ret = mask;
11307             }
11308         }
11309         return ret;
11310 #endif
11311 
11312 #ifdef TARGET_NR_getgid32
11313     case TARGET_NR_getgid32:
11314         return get_errno(getgid());
11315 #endif
11316 #ifdef TARGET_NR_geteuid32
11317     case TARGET_NR_geteuid32:
11318         return get_errno(geteuid());
11319 #endif
11320 #ifdef TARGET_NR_getegid32
11321     case TARGET_NR_getegid32:
11322         return get_errno(getegid());
11323 #endif
11324 #ifdef TARGET_NR_setreuid32
11325     case TARGET_NR_setreuid32:
11326         return get_errno(setreuid(arg1, arg2));
11327 #endif
11328 #ifdef TARGET_NR_setregid32
11329     case TARGET_NR_setregid32:
11330         return get_errno(setregid(arg1, arg2));
11331 #endif
11332 #ifdef TARGET_NR_getgroups32
11333     case TARGET_NR_getgroups32:
11334         {
11335             int gidsetsize = arg1;
11336             uint32_t *target_grouplist;
11337             gid_t *grouplist;
11338             int i;
11339 
11340             grouplist = alloca(gidsetsize * sizeof(gid_t));
11341             ret = get_errno(getgroups(gidsetsize, grouplist));
11342             if (gidsetsize == 0)
11343                 return ret;
11344             if (!is_error(ret)) {
11345                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11346                 if (!target_grouplist) {
11347                     return -TARGET_EFAULT;
11348                 }
11349                 for(i = 0;i < ret; i++)
11350                     target_grouplist[i] = tswap32(grouplist[i]);
11351                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11352             }
11353         }
11354         return ret;
11355 #endif
11356 #ifdef TARGET_NR_setgroups32
11357     case TARGET_NR_setgroups32:
11358         {
11359             int gidsetsize = arg1;
11360             uint32_t *target_grouplist;
11361             gid_t *grouplist;
11362             int i;
11363 
11364             grouplist = alloca(gidsetsize * sizeof(gid_t));
11365             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11366             if (!target_grouplist) {
11367                 return -TARGET_EFAULT;
11368             }
11369             for(i = 0;i < gidsetsize; i++)
11370                 grouplist[i] = tswap32(target_grouplist[i]);
11371             unlock_user(target_grouplist, arg2, 0);
11372             return get_errno(setgroups(gidsetsize, grouplist));
11373         }
11374 #endif
11375 #ifdef TARGET_NR_fchown32
11376     case TARGET_NR_fchown32:
11377         return get_errno(fchown(arg1, arg2, arg3));
11378 #endif
11379 #ifdef TARGET_NR_setresuid32
11380     case TARGET_NR_setresuid32:
11381         return get_errno(sys_setresuid(arg1, arg2, arg3));
11382 #endif
11383 #ifdef TARGET_NR_getresuid32
11384     case TARGET_NR_getresuid32:
11385         {
11386             uid_t ruid, euid, suid;
11387             ret = get_errno(getresuid(&ruid, &euid, &suid));
11388             if (!is_error(ret)) {
11389                 if (put_user_u32(ruid, arg1)
11390                     || put_user_u32(euid, arg2)
11391                     || put_user_u32(suid, arg3))
11392                     return -TARGET_EFAULT;
11393             }
11394         }
11395         return ret;
11396 #endif
11397 #ifdef TARGET_NR_setresgid32
11398     case TARGET_NR_setresgid32:
11399         return get_errno(sys_setresgid(arg1, arg2, arg3));
11400 #endif
11401 #ifdef TARGET_NR_getresgid32
11402     case TARGET_NR_getresgid32:
11403         {
11404             gid_t rgid, egid, sgid;
11405             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11406             if (!is_error(ret)) {
11407                 if (put_user_u32(rgid, arg1)
11408                     || put_user_u32(egid, arg2)
11409                     || put_user_u32(sgid, arg3))
11410                     return -TARGET_EFAULT;
11411             }
11412         }
11413         return ret;
11414 #endif
11415 #ifdef TARGET_NR_chown32
11416     case TARGET_NR_chown32:
11417         if (!(p = lock_user_string(arg1)))
11418             return -TARGET_EFAULT;
11419         ret = get_errno(chown(p, arg2, arg3));
11420         unlock_user(p, arg1, 0);
11421         return ret;
11422 #endif
11423 #ifdef TARGET_NR_setuid32
11424     case TARGET_NR_setuid32:
11425         return get_errno(sys_setuid(arg1));
11426 #endif
11427 #ifdef TARGET_NR_setgid32
11428     case TARGET_NR_setgid32:
11429         return get_errno(sys_setgid(arg1));
11430 #endif
11431 #ifdef TARGET_NR_setfsuid32
11432     case TARGET_NR_setfsuid32:
11433         return get_errno(setfsuid(arg1));
11434 #endif
11435 #ifdef TARGET_NR_setfsgid32
11436     case TARGET_NR_setfsgid32:
11437         return get_errno(setfsgid(arg1));
11438 #endif
11439 #ifdef TARGET_NR_mincore
11440     case TARGET_NR_mincore:
11441         {
11442             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11443             if (!a) {
11444                 return -TARGET_ENOMEM;
11445             }
11446             p = lock_user_string(arg3);
11447             if (!p) {
11448                 ret = -TARGET_EFAULT;
11449             } else {
11450                 ret = get_errno(mincore(a, arg2, p));
11451                 unlock_user(p, arg3, ret);
11452             }
11453             unlock_user(a, arg1, 0);
11454         }
11455         return ret;
11456 #endif
11457 #ifdef TARGET_NR_arm_fadvise64_64
11458     case TARGET_NR_arm_fadvise64_64:
11459         /* arm_fadvise64_64 looks like fadvise64_64 but
11460          * with different argument order: fd, advice, offset, len
11461          * rather than the usual fd, offset, len, advice.
11462          * Note that offset and len are both 64-bit so appear as
11463          * pairs of 32-bit registers.
11464          */
11465         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11466                             target_offset64(arg5, arg6), arg2);
11467         return -host_to_target_errno(ret);
11468 #endif
11469 
11470 #if TARGET_ABI_BITS == 32
11471 
11472 #ifdef TARGET_NR_fadvise64_64
11473     case TARGET_NR_fadvise64_64:
11474 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11475         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11476         ret = arg2;
11477         arg2 = arg3;
11478         arg3 = arg4;
11479         arg4 = arg5;
11480         arg5 = arg6;
11481         arg6 = ret;
11482 #else
11483         /* 6 args: fd, offset (high, low), len (high, low), advice */
11484         if (regpairs_aligned(cpu_env, num)) {
11485             /* offset is in (3,4), len in (5,6) and advice in 7 */
11486             arg2 = arg3;
11487             arg3 = arg4;
11488             arg4 = arg5;
11489             arg5 = arg6;
11490             arg6 = arg7;
11491         }
11492 #endif
11493         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11494                             target_offset64(arg4, arg5), arg6);
11495         return -host_to_target_errno(ret);
11496 #endif
11497 
11498 #ifdef TARGET_NR_fadvise64
11499     case TARGET_NR_fadvise64:
11500         /* 5 args: fd, offset (high, low), len, advice */
11501         if (regpairs_aligned(cpu_env, num)) {
11502             /* offset is in (3,4), len in 5 and advice in 6 */
11503             arg2 = arg3;
11504             arg3 = arg4;
11505             arg4 = arg5;
11506             arg5 = arg6;
11507         }
11508         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11509         return -host_to_target_errno(ret);
11510 #endif
11511 
11512 #else /* not a 32-bit ABI */
11513 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11514 #ifdef TARGET_NR_fadvise64_64
11515     case TARGET_NR_fadvise64_64:
11516 #endif
11517 #ifdef TARGET_NR_fadvise64
11518     case TARGET_NR_fadvise64:
11519 #endif
11520 #ifdef TARGET_S390X
11521         switch (arg4) {
11522         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11523         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11524         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11525         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11526         default: break;
11527         }
11528 #endif
11529         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11530 #endif
11531 #endif /* end of 64-bit ABI fadvise handling */
11532 
11533 #ifdef TARGET_NR_madvise
11534     case TARGET_NR_madvise:
11535         /* A straight passthrough may not be safe because qemu sometimes
11536            turns private file-backed mappings into anonymous mappings.
11537            This will break MADV_DONTNEED.
11538            This is a hint, so ignoring and returning success is ok.  */
11539         return 0;
11540 #endif
11541 #ifdef TARGET_NR_fcntl64
11542     case TARGET_NR_fcntl64:
11543     {
11544         int cmd;
11545         struct flock64 fl;
11546         from_flock64_fn *copyfrom = copy_from_user_flock64;
11547         to_flock64_fn *copyto = copy_to_user_flock64;
11548 
11549 #ifdef TARGET_ARM
11550         if (!((CPUARMState *)cpu_env)->eabi) {
11551             copyfrom = copy_from_user_oabi_flock64;
11552             copyto = copy_to_user_oabi_flock64;
11553         }
11554 #endif
11555 
11556         cmd = target_to_host_fcntl_cmd(arg2);
11557         if (cmd == -TARGET_EINVAL) {
11558             return cmd;
11559         }
11560 
11561         switch(arg2) {
11562         case TARGET_F_GETLK64:
11563             ret = copyfrom(&fl, arg3);
11564             if (ret) {
11565                 break;
11566             }
11567             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11568             if (ret == 0) {
11569                 ret = copyto(arg3, &fl);
11570             }
11571 	    break;
11572 
11573         case TARGET_F_SETLK64:
11574         case TARGET_F_SETLKW64:
11575             ret = copyfrom(&fl, arg3);
11576             if (ret) {
11577                 break;
11578             }
11579             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11580 	    break;
11581         default:
11582             ret = do_fcntl(arg1, arg2, arg3);
11583             break;
11584         }
11585         return ret;
11586     }
11587 #endif
11588 #ifdef TARGET_NR_cacheflush
11589     case TARGET_NR_cacheflush:
11590         /* self-modifying code is handled automatically, so nothing needed */
11591         return 0;
11592 #endif
11593 #ifdef TARGET_NR_getpagesize
11594     case TARGET_NR_getpagesize:
11595         return TARGET_PAGE_SIZE;
11596 #endif
11597     case TARGET_NR_gettid:
11598         return get_errno(sys_gettid());
11599 #ifdef TARGET_NR_readahead
11600     case TARGET_NR_readahead:
11601 #if TARGET_ABI_BITS == 32
11602         if (regpairs_aligned(cpu_env, num)) {
11603             arg2 = arg3;
11604             arg3 = arg4;
11605             arg4 = arg5;
11606         }
11607         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11608 #else
11609         ret = get_errno(readahead(arg1, arg2, arg3));
11610 #endif
11611         return ret;
11612 #endif
11613 #ifdef CONFIG_ATTR
11614 #ifdef TARGET_NR_setxattr
11615     case TARGET_NR_listxattr:
11616     case TARGET_NR_llistxattr:
11617     {
11618         void *p, *b = 0;
11619         if (arg2) {
11620             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11621             if (!b) {
11622                 return -TARGET_EFAULT;
11623             }
11624         }
11625         p = lock_user_string(arg1);
11626         if (p) {
11627             if (num == TARGET_NR_listxattr) {
11628                 ret = get_errno(listxattr(p, b, arg3));
11629             } else {
11630                 ret = get_errno(llistxattr(p, b, arg3));
11631             }
11632         } else {
11633             ret = -TARGET_EFAULT;
11634         }
11635         unlock_user(p, arg1, 0);
11636         unlock_user(b, arg2, arg3);
11637         return ret;
11638     }
11639     case TARGET_NR_flistxattr:
11640     {
11641         void *b = 0;
11642         if (arg2) {
11643             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11644             if (!b) {
11645                 return -TARGET_EFAULT;
11646             }
11647         }
11648         ret = get_errno(flistxattr(arg1, b, arg3));
11649         unlock_user(b, arg2, arg3);
11650         return ret;
11651     }
11652     case TARGET_NR_setxattr:
11653     case TARGET_NR_lsetxattr:
11654         {
11655             void *p, *n, *v = 0;
11656             if (arg3) {
11657                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11658                 if (!v) {
11659                     return -TARGET_EFAULT;
11660                 }
11661             }
11662             p = lock_user_string(arg1);
11663             n = lock_user_string(arg2);
11664             if (p && n) {
11665                 if (num == TARGET_NR_setxattr) {
11666                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11667                 } else {
11668                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11669                 }
11670             } else {
11671                 ret = -TARGET_EFAULT;
11672             }
11673             unlock_user(p, arg1, 0);
11674             unlock_user(n, arg2, 0);
11675             unlock_user(v, arg3, 0);
11676         }
11677         return ret;
11678     case TARGET_NR_fsetxattr:
11679         {
11680             void *n, *v = 0;
11681             if (arg3) {
11682                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11683                 if (!v) {
11684                     return -TARGET_EFAULT;
11685                 }
11686             }
11687             n = lock_user_string(arg2);
11688             if (n) {
11689                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11690             } else {
11691                 ret = -TARGET_EFAULT;
11692             }
11693             unlock_user(n, arg2, 0);
11694             unlock_user(v, arg3, 0);
11695         }
11696         return ret;
11697     case TARGET_NR_getxattr:
11698     case TARGET_NR_lgetxattr:
11699         {
11700             void *p, *n, *v = 0;
11701             if (arg3) {
11702                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11703                 if (!v) {
11704                     return -TARGET_EFAULT;
11705                 }
11706             }
11707             p = lock_user_string(arg1);
11708             n = lock_user_string(arg2);
11709             if (p && n) {
11710                 if (num == TARGET_NR_getxattr) {
11711                     ret = get_errno(getxattr(p, n, v, arg4));
11712                 } else {
11713                     ret = get_errno(lgetxattr(p, n, v, arg4));
11714                 }
11715             } else {
11716                 ret = -TARGET_EFAULT;
11717             }
11718             unlock_user(p, arg1, 0);
11719             unlock_user(n, arg2, 0);
11720             unlock_user(v, arg3, arg4);
11721         }
11722         return ret;
11723     case TARGET_NR_fgetxattr:
11724         {
11725             void *n, *v = 0;
11726             if (arg3) {
11727                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11728                 if (!v) {
11729                     return -TARGET_EFAULT;
11730                 }
11731             }
11732             n = lock_user_string(arg2);
11733             if (n) {
11734                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11735             } else {
11736                 ret = -TARGET_EFAULT;
11737             }
11738             unlock_user(n, arg2, 0);
11739             unlock_user(v, arg3, arg4);
11740         }
11741         return ret;
11742     case TARGET_NR_removexattr:
11743     case TARGET_NR_lremovexattr:
11744         {
11745             void *p, *n;
11746             p = lock_user_string(arg1);
11747             n = lock_user_string(arg2);
11748             if (p && n) {
11749                 if (num == TARGET_NR_removexattr) {
11750                     ret = get_errno(removexattr(p, n));
11751                 } else {
11752                     ret = get_errno(lremovexattr(p, n));
11753                 }
11754             } else {
11755                 ret = -TARGET_EFAULT;
11756             }
11757             unlock_user(p, arg1, 0);
11758             unlock_user(n, arg2, 0);
11759         }
11760         return ret;
11761     case TARGET_NR_fremovexattr:
11762         {
11763             void *n;
11764             n = lock_user_string(arg2);
11765             if (n) {
11766                 ret = get_errno(fremovexattr(arg1, n));
11767             } else {
11768                 ret = -TARGET_EFAULT;
11769             }
11770             unlock_user(n, arg2, 0);
11771         }
11772         return ret;
11773 #endif
11774 #endif /* CONFIG_ATTR */
11775 #ifdef TARGET_NR_set_thread_area
11776     case TARGET_NR_set_thread_area:
11777 #if defined(TARGET_MIPS)
11778       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11779       return 0;
11780 #elif defined(TARGET_CRIS)
11781       if (arg1 & 0xff)
11782           ret = -TARGET_EINVAL;
11783       else {
11784           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11785           ret = 0;
11786       }
11787       return ret;
11788 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11789       return do_set_thread_area(cpu_env, arg1);
11790 #elif defined(TARGET_M68K)
11791       {
11792           TaskState *ts = cpu->opaque;
11793           ts->tp_value = arg1;
11794           return 0;
11795       }
11796 #else
11797       return -TARGET_ENOSYS;
11798 #endif
11799 #endif
11800 #ifdef TARGET_NR_get_thread_area
11801     case TARGET_NR_get_thread_area:
11802 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11803         return do_get_thread_area(cpu_env, arg1);
11804 #elif defined(TARGET_M68K)
11805         {
11806             TaskState *ts = cpu->opaque;
11807             return ts->tp_value;
11808         }
11809 #else
11810         return -TARGET_ENOSYS;
11811 #endif
11812 #endif
11813 #ifdef TARGET_NR_getdomainname
11814     case TARGET_NR_getdomainname:
11815         return -TARGET_ENOSYS;
11816 #endif
11817 
11818 #ifdef TARGET_NR_clock_settime
11819     case TARGET_NR_clock_settime:
11820     {
11821         struct timespec ts;
11822 
11823         ret = target_to_host_timespec(&ts, arg2);
11824         if (!is_error(ret)) {
11825             ret = get_errno(clock_settime(arg1, &ts));
11826         }
11827         return ret;
11828     }
11829 #endif
11830 #ifdef TARGET_NR_clock_settime64
11831     case TARGET_NR_clock_settime64:
11832     {
11833         struct timespec ts;
11834 
11835         ret = target_to_host_timespec64(&ts, arg2);
11836         if (!is_error(ret)) {
11837             ret = get_errno(clock_settime(arg1, &ts));
11838         }
11839         return ret;
11840     }
11841 #endif
11842 #ifdef TARGET_NR_clock_gettime
11843     case TARGET_NR_clock_gettime:
11844     {
11845         struct timespec ts;
11846         ret = get_errno(clock_gettime(arg1, &ts));
11847         if (!is_error(ret)) {
11848             ret = host_to_target_timespec(arg2, &ts);
11849         }
11850         return ret;
11851     }
11852 #endif
11853 #ifdef TARGET_NR_clock_gettime64
11854     case TARGET_NR_clock_gettime64:
11855     {
11856         struct timespec ts;
11857         ret = get_errno(clock_gettime(arg1, &ts));
11858         if (!is_error(ret)) {
11859             ret = host_to_target_timespec64(arg2, &ts);
11860         }
11861         return ret;
11862     }
11863 #endif
11864 #ifdef TARGET_NR_clock_getres
11865     case TARGET_NR_clock_getres:
11866     {
11867         struct timespec ts;
11868         ret = get_errno(clock_getres(arg1, &ts));
11869         if (!is_error(ret)) {
11870             host_to_target_timespec(arg2, &ts);
11871         }
11872         return ret;
11873     }
11874 #endif
11875 #ifdef TARGET_NR_clock_getres_time64
11876     case TARGET_NR_clock_getres_time64:
11877     {
11878         struct timespec ts;
11879         ret = get_errno(clock_getres(arg1, &ts));
11880         if (!is_error(ret)) {
11881             host_to_target_timespec64(arg2, &ts);
11882         }
11883         return ret;
11884     }
11885 #endif
11886 #ifdef TARGET_NR_clock_nanosleep
11887     case TARGET_NR_clock_nanosleep:
11888     {
11889         struct timespec ts;
11890         if (target_to_host_timespec(&ts, arg3)) {
11891             return -TARGET_EFAULT;
11892         }
11893         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11894                                              &ts, arg4 ? &ts : NULL));
11895         /*
11896          * if the call is interrupted by a signal handler, it fails
11897          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11898          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11899          */
11900         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
11901             host_to_target_timespec(arg4, &ts)) {
11902               return -TARGET_EFAULT;
11903         }
11904 
11905         return ret;
11906     }
11907 #endif
11908 
11909 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11910     case TARGET_NR_set_tid_address:
11911         return get_errno(set_tid_address((int *)g2h(arg1)));
11912 #endif
11913 
11914     case TARGET_NR_tkill:
11915         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11916 
11917     case TARGET_NR_tgkill:
11918         return get_errno(safe_tgkill((int)arg1, (int)arg2,
11919                          target_to_host_signal(arg3)));
11920 
11921 #ifdef TARGET_NR_set_robust_list
11922     case TARGET_NR_set_robust_list:
11923     case TARGET_NR_get_robust_list:
11924         /* The ABI for supporting robust futexes has userspace pass
11925          * the kernel a pointer to a linked list which is updated by
11926          * userspace after the syscall; the list is walked by the kernel
11927          * when the thread exits. Since the linked list in QEMU guest
11928          * memory isn't a valid linked list for the host and we have
11929          * no way to reliably intercept the thread-death event, we can't
11930          * support these. Silently return ENOSYS so that guest userspace
11931          * falls back to a non-robust futex implementation (which should
11932          * be OK except in the corner case of the guest crashing while
11933          * holding a mutex that is shared with another process via
11934          * shared memory).
11935          */
11936         return -TARGET_ENOSYS;
11937 #endif
11938 
11939 #if defined(TARGET_NR_utimensat)
11940     case TARGET_NR_utimensat:
11941         {
11942             struct timespec *tsp, ts[2];
11943             if (!arg3) {
11944                 tsp = NULL;
11945             } else {
11946                 if (target_to_host_timespec(ts, arg3)) {
11947                     return -TARGET_EFAULT;
11948                 }
11949                 if (target_to_host_timespec(ts + 1, arg3 +
11950                                             sizeof(struct target_timespec))) {
11951                     return -TARGET_EFAULT;
11952                 }
11953                 tsp = ts;
11954             }
11955             if (!arg2)
11956                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11957             else {
11958                 if (!(p = lock_user_string(arg2))) {
11959                     return -TARGET_EFAULT;
11960                 }
11961                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11962                 unlock_user(p, arg2, 0);
11963             }
11964         }
11965         return ret;
11966 #endif
11967 #ifdef TARGET_NR_futex
11968     case TARGET_NR_futex:
11969         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11970 #endif
11971 #ifdef TARGET_NR_futex_time64
11972     case TARGET_NR_futex_time64:
11973         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11974 #endif
11975 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11976     case TARGET_NR_inotify_init:
11977         ret = get_errno(sys_inotify_init());
11978         if (ret >= 0) {
11979             fd_trans_register(ret, &target_inotify_trans);
11980         }
11981         return ret;
11982 #endif
11983 #ifdef CONFIG_INOTIFY1
11984 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11985     case TARGET_NR_inotify_init1:
11986         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11987                                           fcntl_flags_tbl)));
11988         if (ret >= 0) {
11989             fd_trans_register(ret, &target_inotify_trans);
11990         }
11991         return ret;
11992 #endif
11993 #endif
11994 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11995     case TARGET_NR_inotify_add_watch:
11996         p = lock_user_string(arg2);
11997         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11998         unlock_user(p, arg2, 0);
11999         return ret;
12000 #endif
12001 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12002     case TARGET_NR_inotify_rm_watch:
12003         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12004 #endif
12005 
12006 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12007     case TARGET_NR_mq_open:
12008         {
12009             struct mq_attr posix_mq_attr;
12010             struct mq_attr *pposix_mq_attr;
12011             int host_flags;
12012 
12013             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12014             pposix_mq_attr = NULL;
12015             if (arg4) {
12016                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12017                     return -TARGET_EFAULT;
12018                 }
12019                 pposix_mq_attr = &posix_mq_attr;
12020             }
12021             p = lock_user_string(arg1 - 1);
12022             if (!p) {
12023                 return -TARGET_EFAULT;
12024             }
12025             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12026             unlock_user (p, arg1, 0);
12027         }
12028         return ret;
12029 
12030     case TARGET_NR_mq_unlink:
12031         p = lock_user_string(arg1 - 1);
12032         if (!p) {
12033             return -TARGET_EFAULT;
12034         }
12035         ret = get_errno(mq_unlink(p));
12036         unlock_user (p, arg1, 0);
12037         return ret;
12038 
12039 #ifdef TARGET_NR_mq_timedsend
12040     case TARGET_NR_mq_timedsend:
12041         {
12042             struct timespec ts;
12043 
12044             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12045             if (arg5 != 0) {
12046                 if (target_to_host_timespec(&ts, arg5)) {
12047                     return -TARGET_EFAULT;
12048                 }
12049                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12050                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12051                     return -TARGET_EFAULT;
12052                 }
12053             } else {
12054                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12055             }
12056             unlock_user (p, arg2, arg3);
12057         }
12058         return ret;
12059 #endif
12060 
12061 #ifdef TARGET_NR_mq_timedreceive
12062     case TARGET_NR_mq_timedreceive:
12063         {
12064             struct timespec ts;
12065             unsigned int prio;
12066 
12067             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12068             if (arg5 != 0) {
12069                 if (target_to_host_timespec(&ts, arg5)) {
12070                     return -TARGET_EFAULT;
12071                 }
12072                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12073                                                      &prio, &ts));
12074                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12075                     return -TARGET_EFAULT;
12076                 }
12077             } else {
12078                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12079                                                      &prio, NULL));
12080             }
12081             unlock_user (p, arg2, arg3);
12082             if (arg4 != 0)
12083                 put_user_u32(prio, arg4);
12084         }
12085         return ret;
12086 #endif
12087 
12088     /* Not implemented for now... */
12089 /*     case TARGET_NR_mq_notify: */
12090 /*         break; */
12091 
12092     case TARGET_NR_mq_getsetattr:
12093         {
12094             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12095             ret = 0;
12096             if (arg2 != 0) {
12097                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12098                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12099                                            &posix_mq_attr_out));
12100             } else if (arg3 != 0) {
12101                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12102             }
12103             if (ret == 0 && arg3 != 0) {
12104                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12105             }
12106         }
12107         return ret;
12108 #endif
12109 
12110 #ifdef CONFIG_SPLICE
12111 #ifdef TARGET_NR_tee
12112     case TARGET_NR_tee:
12113         {
12114             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12115         }
12116         return ret;
12117 #endif
12118 #ifdef TARGET_NR_splice
12119     case TARGET_NR_splice:
12120         {
12121             loff_t loff_in, loff_out;
12122             loff_t *ploff_in = NULL, *ploff_out = NULL;
12123             if (arg2) {
12124                 if (get_user_u64(loff_in, arg2)) {
12125                     return -TARGET_EFAULT;
12126                 }
12127                 ploff_in = &loff_in;
12128             }
12129             if (arg4) {
12130                 if (get_user_u64(loff_out, arg4)) {
12131                     return -TARGET_EFAULT;
12132                 }
12133                 ploff_out = &loff_out;
12134             }
12135             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12136             if (arg2) {
12137                 if (put_user_u64(loff_in, arg2)) {
12138                     return -TARGET_EFAULT;
12139                 }
12140             }
12141             if (arg4) {
12142                 if (put_user_u64(loff_out, arg4)) {
12143                     return -TARGET_EFAULT;
12144                 }
12145             }
12146         }
12147         return ret;
12148 #endif
12149 #ifdef TARGET_NR_vmsplice
12150 	case TARGET_NR_vmsplice:
12151         {
12152             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12153             if (vec != NULL) {
12154                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12155                 unlock_iovec(vec, arg2, arg3, 0);
12156             } else {
12157                 ret = -host_to_target_errno(errno);
12158             }
12159         }
12160         return ret;
12161 #endif
12162 #endif /* CONFIG_SPLICE */
12163 #ifdef CONFIG_EVENTFD
12164 #if defined(TARGET_NR_eventfd)
12165     case TARGET_NR_eventfd:
12166         ret = get_errno(eventfd(arg1, 0));
12167         if (ret >= 0) {
12168             fd_trans_register(ret, &target_eventfd_trans);
12169         }
12170         return ret;
12171 #endif
12172 #if defined(TARGET_NR_eventfd2)
12173     case TARGET_NR_eventfd2:
12174     {
12175         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12176         if (arg2 & TARGET_O_NONBLOCK) {
12177             host_flags |= O_NONBLOCK;
12178         }
12179         if (arg2 & TARGET_O_CLOEXEC) {
12180             host_flags |= O_CLOEXEC;
12181         }
12182         ret = get_errno(eventfd(arg1, host_flags));
12183         if (ret >= 0) {
12184             fd_trans_register(ret, &target_eventfd_trans);
12185         }
12186         return ret;
12187     }
12188 #endif
12189 #endif /* CONFIG_EVENTFD  */
12190 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12191     case TARGET_NR_fallocate:
12192 #if TARGET_ABI_BITS == 32
12193         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12194                                   target_offset64(arg5, arg6)));
12195 #else
12196         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12197 #endif
12198         return ret;
12199 #endif
12200 #if defined(CONFIG_SYNC_FILE_RANGE)
12201 #if defined(TARGET_NR_sync_file_range)
12202     case TARGET_NR_sync_file_range:
12203 #if TARGET_ABI_BITS == 32
12204 #if defined(TARGET_MIPS)
12205         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12206                                         target_offset64(arg5, arg6), arg7));
12207 #else
12208         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12209                                         target_offset64(arg4, arg5), arg6));
12210 #endif /* !TARGET_MIPS */
12211 #else
12212         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12213 #endif
12214         return ret;
12215 #endif
12216 #if defined(TARGET_NR_sync_file_range2) || \
12217     defined(TARGET_NR_arm_sync_file_range)
12218 #if defined(TARGET_NR_sync_file_range2)
12219     case TARGET_NR_sync_file_range2:
12220 #endif
12221 #if defined(TARGET_NR_arm_sync_file_range)
12222     case TARGET_NR_arm_sync_file_range:
12223 #endif
12224         /* This is like sync_file_range but the arguments are reordered */
12225 #if TARGET_ABI_BITS == 32
12226         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12227                                         target_offset64(arg5, arg6), arg2));
12228 #else
12229         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12230 #endif
12231         return ret;
12232 #endif
12233 #endif
12234 #if defined(TARGET_NR_signalfd4)
12235     case TARGET_NR_signalfd4:
12236         return do_signalfd4(arg1, arg2, arg4);
12237 #endif
12238 #if defined(TARGET_NR_signalfd)
12239     case TARGET_NR_signalfd:
12240         return do_signalfd4(arg1, arg2, 0);
12241 #endif
12242 #if defined(CONFIG_EPOLL)
12243 #if defined(TARGET_NR_epoll_create)
12244     case TARGET_NR_epoll_create:
12245         return get_errno(epoll_create(arg1));
12246 #endif
12247 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12248     case TARGET_NR_epoll_create1:
12249         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12250 #endif
12251 #if defined(TARGET_NR_epoll_ctl)
12252     case TARGET_NR_epoll_ctl:
12253     {
12254         struct epoll_event ep;
12255         struct epoll_event *epp = 0;
12256         if (arg4) {
12257             struct target_epoll_event *target_ep;
12258             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12259                 return -TARGET_EFAULT;
12260             }
12261             ep.events = tswap32(target_ep->events);
12262             /* The epoll_data_t union is just opaque data to the kernel,
12263              * so we transfer all 64 bits across and need not worry what
12264              * actual data type it is.
12265              */
12266             ep.data.u64 = tswap64(target_ep->data.u64);
12267             unlock_user_struct(target_ep, arg4, 0);
12268             epp = &ep;
12269         }
12270         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12271     }
12272 #endif
12273 
12274 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12275 #if defined(TARGET_NR_epoll_wait)
12276     case TARGET_NR_epoll_wait:
12277 #endif
12278 #if defined(TARGET_NR_epoll_pwait)
12279     case TARGET_NR_epoll_pwait:
12280 #endif
12281     {
12282         struct target_epoll_event *target_ep;
12283         struct epoll_event *ep;
12284         int epfd = arg1;
12285         int maxevents = arg3;
12286         int timeout = arg4;
12287 
12288         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12289             return -TARGET_EINVAL;
12290         }
12291 
12292         target_ep = lock_user(VERIFY_WRITE, arg2,
12293                               maxevents * sizeof(struct target_epoll_event), 1);
12294         if (!target_ep) {
12295             return -TARGET_EFAULT;
12296         }
12297 
12298         ep = g_try_new(struct epoll_event, maxevents);
12299         if (!ep) {
12300             unlock_user(target_ep, arg2, 0);
12301             return -TARGET_ENOMEM;
12302         }
12303 
12304         switch (num) {
12305 #if defined(TARGET_NR_epoll_pwait)
12306         case TARGET_NR_epoll_pwait:
12307         {
12308             target_sigset_t *target_set;
12309             sigset_t _set, *set = &_set;
12310 
12311             if (arg5) {
12312                 if (arg6 != sizeof(target_sigset_t)) {
12313                     ret = -TARGET_EINVAL;
12314                     break;
12315                 }
12316 
12317                 target_set = lock_user(VERIFY_READ, arg5,
12318                                        sizeof(target_sigset_t), 1);
12319                 if (!target_set) {
12320                     ret = -TARGET_EFAULT;
12321                     break;
12322                 }
12323                 target_to_host_sigset(set, target_set);
12324                 unlock_user(target_set, arg5, 0);
12325             } else {
12326                 set = NULL;
12327             }
12328 
12329             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12330                                              set, SIGSET_T_SIZE));
12331             break;
12332         }
12333 #endif
12334 #if defined(TARGET_NR_epoll_wait)
12335         case TARGET_NR_epoll_wait:
12336             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12337                                              NULL, 0));
12338             break;
12339 #endif
12340         default:
12341             ret = -TARGET_ENOSYS;
12342         }
12343         if (!is_error(ret)) {
12344             int i;
12345             for (i = 0; i < ret; i++) {
12346                 target_ep[i].events = tswap32(ep[i].events);
12347                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12348             }
12349             unlock_user(target_ep, arg2,
12350                         ret * sizeof(struct target_epoll_event));
12351         } else {
12352             unlock_user(target_ep, arg2, 0);
12353         }
12354         g_free(ep);
12355         return ret;
12356     }
12357 #endif
12358 #endif
12359 #ifdef TARGET_NR_prlimit64
12360     case TARGET_NR_prlimit64:
12361     {
12362         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12363         struct target_rlimit64 *target_rnew, *target_rold;
12364         struct host_rlimit64 rnew, rold, *rnewp = 0;
12365         int resource = target_to_host_resource(arg2);
12366 
12367         if (arg3 && (resource != RLIMIT_AS &&
12368                      resource != RLIMIT_DATA &&
12369                      resource != RLIMIT_STACK)) {
12370             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12371                 return -TARGET_EFAULT;
12372             }
12373             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12374             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12375             unlock_user_struct(target_rnew, arg3, 0);
12376             rnewp = &rnew;
12377         }
12378 
12379         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12380         if (!is_error(ret) && arg4) {
12381             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12382                 return -TARGET_EFAULT;
12383             }
12384             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12385             target_rold->rlim_max = tswap64(rold.rlim_max);
12386             unlock_user_struct(target_rold, arg4, 1);
12387         }
12388         return ret;
12389     }
12390 #endif
12391 #ifdef TARGET_NR_gethostname
12392     case TARGET_NR_gethostname:
12393     {
12394         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12395         if (name) {
12396             ret = get_errno(gethostname(name, arg2));
12397             unlock_user(name, arg1, arg2);
12398         } else {
12399             ret = -TARGET_EFAULT;
12400         }
12401         return ret;
12402     }
12403 #endif
12404 #ifdef TARGET_NR_atomic_cmpxchg_32
12405     case TARGET_NR_atomic_cmpxchg_32:
12406     {
12407         /* should use start_exclusive from main.c */
12408         abi_ulong mem_value;
12409         if (get_user_u32(mem_value, arg6)) {
12410             target_siginfo_t info;
12411             info.si_signo = SIGSEGV;
12412             info.si_errno = 0;
12413             info.si_code = TARGET_SEGV_MAPERR;
12414             info._sifields._sigfault._addr = arg6;
12415             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12416                          QEMU_SI_FAULT, &info);
12417             ret = 0xdeadbeef;
12418 
12419         }
12420         if (mem_value == arg2)
12421             put_user_u32(arg1, arg6);
12422         return mem_value;
12423     }
12424 #endif
12425 #ifdef TARGET_NR_atomic_barrier
12426     case TARGET_NR_atomic_barrier:
12427         /* Like the kernel implementation and the
12428            qemu arm barrier, no-op this? */
12429         return 0;
12430 #endif
12431 
12432 #ifdef TARGET_NR_timer_create
12433     case TARGET_NR_timer_create:
12434     {
12435         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12436 
12437         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12438 
12439         int clkid = arg1;
12440         int timer_index = next_free_host_timer();
12441 
12442         if (timer_index < 0) {
12443             ret = -TARGET_EAGAIN;
12444         } else {
12445             timer_t *phtimer = g_posix_timers  + timer_index;
12446 
12447             if (arg2) {
12448                 phost_sevp = &host_sevp;
12449                 ret = target_to_host_sigevent(phost_sevp, arg2);
12450                 if (ret != 0) {
12451                     return ret;
12452                 }
12453             }
12454 
12455             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12456             if (ret) {
12457                 phtimer = NULL;
12458             } else {
12459                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12460                     return -TARGET_EFAULT;
12461                 }
12462             }
12463         }
12464         return ret;
12465     }
12466 #endif
12467 
12468 #ifdef TARGET_NR_timer_settime
12469     case TARGET_NR_timer_settime:
12470     {
12471         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12472          * struct itimerspec * old_value */
12473         target_timer_t timerid = get_timer_id(arg1);
12474 
12475         if (timerid < 0) {
12476             ret = timerid;
12477         } else if (arg3 == 0) {
12478             ret = -TARGET_EINVAL;
12479         } else {
12480             timer_t htimer = g_posix_timers[timerid];
12481             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12482 
12483             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12484                 return -TARGET_EFAULT;
12485             }
12486             ret = get_errno(
12487                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12488             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12489                 return -TARGET_EFAULT;
12490             }
12491         }
12492         return ret;
12493     }
12494 #endif
12495 
12496 #ifdef TARGET_NR_timer_settime64
12497     case TARGET_NR_timer_settime64:
12498     {
12499         target_timer_t timerid = get_timer_id(arg1);
12500 
12501         if (timerid < 0) {
12502             ret = timerid;
12503         } else if (arg3 == 0) {
12504             ret = -TARGET_EINVAL;
12505         } else {
12506             timer_t htimer = g_posix_timers[timerid];
12507             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12508 
12509             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12510                 return -TARGET_EFAULT;
12511             }
12512             ret = get_errno(
12513                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12514             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12515                 return -TARGET_EFAULT;
12516             }
12517         }
12518         return ret;
12519     }
12520 #endif
12521 
12522 #ifdef TARGET_NR_timer_gettime
12523     case TARGET_NR_timer_gettime:
12524     {
12525         /* args: timer_t timerid, struct itimerspec *curr_value */
12526         target_timer_t timerid = get_timer_id(arg1);
12527 
12528         if (timerid < 0) {
12529             ret = timerid;
12530         } else if (!arg2) {
12531             ret = -TARGET_EFAULT;
12532         } else {
12533             timer_t htimer = g_posix_timers[timerid];
12534             struct itimerspec hspec;
12535             ret = get_errno(timer_gettime(htimer, &hspec));
12536 
12537             if (host_to_target_itimerspec(arg2, &hspec)) {
12538                 ret = -TARGET_EFAULT;
12539             }
12540         }
12541         return ret;
12542     }
12543 #endif
12544 
12545 #ifdef TARGET_NR_timer_gettime64
12546     case TARGET_NR_timer_gettime64:
12547     {
12548         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12549         target_timer_t timerid = get_timer_id(arg1);
12550 
12551         if (timerid < 0) {
12552             ret = timerid;
12553         } else if (!arg2) {
12554             ret = -TARGET_EFAULT;
12555         } else {
12556             timer_t htimer = g_posix_timers[timerid];
12557             struct itimerspec hspec;
12558             ret = get_errno(timer_gettime(htimer, &hspec));
12559 
12560             if (host_to_target_itimerspec64(arg2, &hspec)) {
12561                 ret = -TARGET_EFAULT;
12562             }
12563         }
12564         return ret;
12565     }
12566 #endif
12567 
12568 #ifdef TARGET_NR_timer_getoverrun
12569     case TARGET_NR_timer_getoverrun:
12570     {
12571         /* args: timer_t timerid */
12572         target_timer_t timerid = get_timer_id(arg1);
12573 
12574         if (timerid < 0) {
12575             ret = timerid;
12576         } else {
12577             timer_t htimer = g_posix_timers[timerid];
12578             ret = get_errno(timer_getoverrun(htimer));
12579         }
12580         return ret;
12581     }
12582 #endif
12583 
12584 #ifdef TARGET_NR_timer_delete
12585     case TARGET_NR_timer_delete:
12586     {
12587         /* args: timer_t timerid */
12588         target_timer_t timerid = get_timer_id(arg1);
12589 
12590         if (timerid < 0) {
12591             ret = timerid;
12592         } else {
12593             timer_t htimer = g_posix_timers[timerid];
12594             ret = get_errno(timer_delete(htimer));
12595             g_posix_timers[timerid] = 0;
12596         }
12597         return ret;
12598     }
12599 #endif
12600 
12601 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12602     case TARGET_NR_timerfd_create:
12603         return get_errno(timerfd_create(arg1,
12604                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12605 #endif
12606 
12607 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12608     case TARGET_NR_timerfd_gettime:
12609         {
12610             struct itimerspec its_curr;
12611 
12612             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12613 
12614             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12615                 return -TARGET_EFAULT;
12616             }
12617         }
12618         return ret;
12619 #endif
12620 
12621 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12622     case TARGET_NR_timerfd_gettime64:
12623         {
12624             struct itimerspec its_curr;
12625 
12626             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12627 
12628             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12629                 return -TARGET_EFAULT;
12630             }
12631         }
12632         return ret;
12633 #endif
12634 
12635 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12636     case TARGET_NR_timerfd_settime:
12637         {
12638             struct itimerspec its_new, its_old, *p_new;
12639 
12640             if (arg3) {
12641                 if (target_to_host_itimerspec(&its_new, arg3)) {
12642                     return -TARGET_EFAULT;
12643                 }
12644                 p_new = &its_new;
12645             } else {
12646                 p_new = NULL;
12647             }
12648 
12649             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12650 
12651             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12652                 return -TARGET_EFAULT;
12653             }
12654         }
12655         return ret;
12656 #endif
12657 
12658 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12659     case TARGET_NR_timerfd_settime64:
12660         {
12661             struct itimerspec its_new, its_old, *p_new;
12662 
12663             if (arg3) {
12664                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12665                     return -TARGET_EFAULT;
12666                 }
12667                 p_new = &its_new;
12668             } else {
12669                 p_new = NULL;
12670             }
12671 
12672             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12673 
12674             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12675                 return -TARGET_EFAULT;
12676             }
12677         }
12678         return ret;
12679 #endif
12680 
12681 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12682     case TARGET_NR_ioprio_get:
12683         return get_errno(ioprio_get(arg1, arg2));
12684 #endif
12685 
12686 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12687     case TARGET_NR_ioprio_set:
12688         return get_errno(ioprio_set(arg1, arg2, arg3));
12689 #endif
12690 
12691 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12692     case TARGET_NR_setns:
12693         return get_errno(setns(arg1, arg2));
12694 #endif
12695 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12696     case TARGET_NR_unshare:
12697         return get_errno(unshare(arg1));
12698 #endif
12699 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12700     case TARGET_NR_kcmp:
12701         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12702 #endif
12703 #ifdef TARGET_NR_swapcontext
12704     case TARGET_NR_swapcontext:
12705         /* PowerPC specific.  */
12706         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12707 #endif
12708 #ifdef TARGET_NR_memfd_create
12709     case TARGET_NR_memfd_create:
12710         p = lock_user_string(arg1);
12711         if (!p) {
12712             return -TARGET_EFAULT;
12713         }
12714         ret = get_errno(memfd_create(p, arg2));
12715         fd_trans_unregister(ret);
12716         unlock_user(p, arg1, 0);
12717         return ret;
12718 #endif
12719 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12720     case TARGET_NR_membarrier:
12721         return get_errno(membarrier(arg1, arg2));
12722 #endif
12723 
12724     default:
12725         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12726         return -TARGET_ENOSYS;
12727     }
12728     return ret;
12729 }
12730 
12731 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12732                     abi_long arg2, abi_long arg3, abi_long arg4,
12733                     abi_long arg5, abi_long arg6, abi_long arg7,
12734                     abi_long arg8)
12735 {
12736     CPUState *cpu = env_cpu(cpu_env);
12737     abi_long ret;
12738 
12739 #ifdef DEBUG_ERESTARTSYS
12740     /* Debug-only code for exercising the syscall-restart code paths
12741      * in the per-architecture cpu main loops: restart every syscall
12742      * the guest makes once before letting it through.
12743      */
12744     {
12745         static bool flag;
12746         flag = !flag;
12747         if (flag) {
12748             return -TARGET_ERESTARTSYS;
12749         }
12750     }
12751 #endif
12752 
12753     record_syscall_start(cpu, num, arg1,
12754                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12755 
12756     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12757         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12758     }
12759 
12760     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12761                       arg5, arg6, arg7, arg8);
12762 
12763     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12764         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12765                           arg3, arg4, arg5, arg6);
12766     }
12767 
12768     record_syscall_return(cpu, num, ret);
12769     return ret;
12770 }
12771