xref: /openbmc/qemu/linux-user/syscall.c (revision 45ad761c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_inotify_init __NR_inotify_init
276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
278 #define __NR_sys_statx __NR_statx
279 
280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
281 #define __NR__llseek __NR_lseek
282 #endif
283 
284 /* Newer kernel ports have llseek() instead of _llseek() */
285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
286 #define TARGET_NR__llseek TARGET_NR_llseek
287 #endif
288 
289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
290 #ifndef TARGET_O_NONBLOCK_MASK
291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #endif
293 
294 #define __NR_sys_gettid __NR_gettid
295 _syscall0(int, sys_gettid)
296 
297 /* For the 64-bit guest on 32-bit host case we must emulate
298  * getdents using getdents64, because otherwise the host
299  * might hand us back more dirent records than we can fit
300  * into the guest buffer after structure format conversion.
301  * Otherwise we emulate getdents with getdents if the host has it.
302  */
303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
304 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #endif
306 
307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
309 #endif
310 #if (defined(TARGET_NR_getdents) && \
311       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
312     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
314 #endif
315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
316 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
317           loff_t *, res, uint, wh);
318 #endif
319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
321           siginfo_t *, uinfo)
322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
323 #ifdef __NR_exit_group
324 _syscall1(int,exit_group,int,error_code)
325 #endif
326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
327 _syscall1(int,set_tid_address,int *,tidptr)
328 #endif
329 #if defined(__NR_futex)
330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #if defined(__NR_futex_time64)
334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
335           const struct timespec *,timeout,int *,uaddr2,int,val3)
336 #endif
337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
342           unsigned long *, user_mask_ptr);
343 /* sched_attr is not defined in glibc */
344 struct sched_attr {
345     uint32_t size;
346     uint32_t sched_policy;
347     uint64_t sched_flags;
348     int32_t sched_nice;
349     uint32_t sched_priority;
350     uint64_t sched_runtime;
351     uint64_t sched_deadline;
352     uint64_t sched_period;
353     uint32_t sched_util_min;
354     uint32_t sched_util_max;
355 };
356 #define __NR_sys_sched_getattr __NR_sched_getattr
357 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, size, unsigned int, flags);
359 #define __NR_sys_sched_setattr __NR_sched_setattr
360 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
361           unsigned int, flags);
362 #define __NR_sys_getcpu __NR_getcpu
363 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
364 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
365           void *, arg);
366 _syscall2(int, capget, struct __user_cap_header_struct *, header,
367           struct __user_cap_data_struct *, data);
368 _syscall2(int, capset, struct __user_cap_header_struct *, header,
369           struct __user_cap_data_struct *, data);
370 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
371 _syscall2(int, ioprio_get, int, which, int, who)
372 #endif
373 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
374 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
375 #endif
376 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
377 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
378 #endif
379 
380 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
381 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
382           unsigned long, idx1, unsigned long, idx2)
383 #endif
384 
385 /*
386  * It is assumed that struct statx is architecture independent.
387  */
388 #if defined(TARGET_NR_statx) && defined(__NR_statx)
389 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
390           unsigned int, mask, struct target_statx *, statxbuf)
391 #endif
392 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
393 _syscall2(int, membarrier, int, cmd, int, flags)
394 #endif
395 
396 static const bitmask_transtbl fcntl_flags_tbl[] = {
397   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
398   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
399   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
400   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
401   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
402   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
403   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
404   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
405   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
406   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
407   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
408   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
409   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
410 #if defined(O_DIRECT)
411   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
412 #endif
413 #if defined(O_NOATIME)
414   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
415 #endif
416 #if defined(O_CLOEXEC)
417   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
418 #endif
419 #if defined(O_PATH)
420   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
421 #endif
422 #if defined(O_TMPFILE)
423   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
424 #endif
425   /* Don't terminate the list prematurely on 64-bit host+guest.  */
426 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
427   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
428 #endif
429   { 0, 0, 0, 0 }
430 };
431 
432 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
433 
434 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
435 #if defined(__NR_utimensat)
436 #define __NR_sys_utimensat __NR_utimensat
437 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
438           const struct timespec *,tsp,int,flags)
439 #else
440 static int sys_utimensat(int dirfd, const char *pathname,
441                          const struct timespec times[2], int flags)
442 {
443     errno = ENOSYS;
444     return -1;
445 }
446 #endif
447 #endif /* TARGET_NR_utimensat */
448 
449 #ifdef TARGET_NR_renameat2
450 #if defined(__NR_renameat2)
451 #define __NR_sys_renameat2 __NR_renameat2
452 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
453           const char *, new, unsigned int, flags)
454 #else
455 static int sys_renameat2(int oldfd, const char *old,
456                          int newfd, const char *new, int flags)
457 {
458     if (flags == 0) {
459         return renameat(oldfd, old, newfd, new);
460     }
461     errno = ENOSYS;
462     return -1;
463 }
464 #endif
465 #endif /* TARGET_NR_renameat2 */
466 
467 #ifdef CONFIG_INOTIFY
468 #include <sys/inotify.h>
469 
470 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
471 static int sys_inotify_init(void)
472 {
473   return (inotify_init());
474 }
475 #endif
476 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
477 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
478 {
479   return (inotify_add_watch(fd, pathname, mask));
480 }
481 #endif
482 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
483 static int sys_inotify_rm_watch(int fd, int32_t wd)
484 {
485   return (inotify_rm_watch(fd, wd));
486 }
487 #endif
488 #ifdef CONFIG_INOTIFY1
489 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
490 static int sys_inotify_init1(int flags)
491 {
492   return (inotify_init1(flags));
493 }
494 #endif
495 #endif
496 #else
497 /* Userspace can usually survive runtime without inotify */
498 #undef TARGET_NR_inotify_init
499 #undef TARGET_NR_inotify_init1
500 #undef TARGET_NR_inotify_add_watch
501 #undef TARGET_NR_inotify_rm_watch
502 #endif /* CONFIG_INOTIFY  */
503 
504 #if defined(TARGET_NR_prlimit64)
505 #ifndef __NR_prlimit64
506 # define __NR_prlimit64 -1
507 #endif
508 #define __NR_sys_prlimit64 __NR_prlimit64
509 /* The glibc rlimit structure may not be that used by the underlying syscall */
510 struct host_rlimit64 {
511     uint64_t rlim_cur;
512     uint64_t rlim_max;
513 };
514 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
515           const struct host_rlimit64 *, new_limit,
516           struct host_rlimit64 *, old_limit)
517 #endif
518 
519 
520 #if defined(TARGET_NR_timer_create)
521 /* Maximum of 32 active POSIX timers allowed at any one time. */
522 static timer_t g_posix_timers[32] = { 0, } ;
523 
524 static inline int next_free_host_timer(void)
525 {
526     int k ;
527     /* FIXME: Does finding the next free slot require a lock? */
528     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
529         if (g_posix_timers[k] == 0) {
530             g_posix_timers[k] = (timer_t) 1;
531             return k;
532         }
533     }
534     return -1;
535 }
536 #endif
537 
538 static inline int host_to_target_errno(int host_errno)
539 {
540     switch (host_errno) {
541 #define E(X)  case X: return TARGET_##X;
542 #include "errnos.c.inc"
543 #undef E
544     default:
545         return host_errno;
546     }
547 }
548 
549 static inline int target_to_host_errno(int target_errno)
550 {
551     switch (target_errno) {
552 #define E(X)  case TARGET_##X: return X;
553 #include "errnos.c.inc"
554 #undef E
555     default:
556         return target_errno;
557     }
558 }
559 
560 static inline abi_long get_errno(abi_long ret)
561 {
562     if (ret == -1)
563         return -host_to_target_errno(errno);
564     else
565         return ret;
566 }
567 
568 const char *target_strerror(int err)
569 {
570     if (err == QEMU_ERESTARTSYS) {
571         return "To be restarted";
572     }
573     if (err == QEMU_ESIGRETURN) {
574         return "Successful exit from sigreturn";
575     }
576 
577     return strerror(target_to_host_errno(err));
578 }
579 
580 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
581 {
582     int i;
583     uint8_t b;
584     if (usize <= ksize) {
585         return 1;
586     }
587     for (i = ksize; i < usize; i++) {
588         if (get_user_u8(b, addr + i)) {
589             return -TARGET_EFAULT;
590         }
591         if (b != 0) {
592             return 0;
593         }
594     }
595     return 1;
596 }
597 
598 #define safe_syscall0(type, name) \
599 static type safe_##name(void) \
600 { \
601     return safe_syscall(__NR_##name); \
602 }
603 
604 #define safe_syscall1(type, name, type1, arg1) \
605 static type safe_##name(type1 arg1) \
606 { \
607     return safe_syscall(__NR_##name, arg1); \
608 }
609 
610 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
611 static type safe_##name(type1 arg1, type2 arg2) \
612 { \
613     return safe_syscall(__NR_##name, arg1, arg2); \
614 }
615 
616 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
618 { \
619     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
620 }
621 
622 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
623     type4, arg4) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
627 }
628 
629 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4, type5, arg5) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
632     type5 arg5) \
633 { \
634     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
635 }
636 
637 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
638     type4, arg4, type5, arg5, type6, arg6) \
639 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
640     type5 arg5, type6 arg6) \
641 { \
642     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
643 }
644 
645 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
646 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
647 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
648               int, flags, mode_t, mode)
649 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
650 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
651               struct rusage *, rusage)
652 #endif
653 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
654               int, options, struct rusage *, rusage)
655 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
656 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
657     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
658 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
659               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
660 #endif
661 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
662 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
663               struct timespec *, tsp, const sigset_t *, sigmask,
664               size_t, sigsetsize)
665 #endif
666 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
667               int, maxevents, int, timeout, const sigset_t *, sigmask,
668               size_t, sigsetsize)
669 #if defined(__NR_futex)
670 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
671               const struct timespec *,timeout,int *,uaddr2,int,val3)
672 #endif
673 #if defined(__NR_futex_time64)
674 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
675               const struct timespec *,timeout,int *,uaddr2,int,val3)
676 #endif
677 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
678 safe_syscall2(int, kill, pid_t, pid, int, sig)
679 safe_syscall2(int, tkill, int, tid, int, sig)
680 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
681 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
682 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
683 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
684               unsigned long, pos_l, unsigned long, pos_h)
685 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
688               socklen_t, addrlen)
689 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
690               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
691 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
692               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
693 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
694 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
695 safe_syscall2(int, flock, int, fd, int, operation)
696 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
697 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
698               const struct timespec *, uts, size_t, sigsetsize)
699 #endif
700 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
701               int, flags)
702 #if defined(TARGET_NR_nanosleep)
703 safe_syscall2(int, nanosleep, const struct timespec *, req,
704               struct timespec *, rem)
705 #endif
706 #if defined(TARGET_NR_clock_nanosleep) || \
707     defined(TARGET_NR_clock_nanosleep_time64)
708 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
709               const struct timespec *, req, struct timespec *, rem)
710 #endif
711 #ifdef __NR_ipc
712 #ifdef __s390x__
713 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
714               void *, ptr)
715 #else
716 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
717               void *, ptr, long, fifth)
718 #endif
719 #endif
720 #ifdef __NR_msgsnd
721 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
722               int, flags)
723 #endif
724 #ifdef __NR_msgrcv
725 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
726               long, msgtype, int, flags)
727 #endif
728 #ifdef __NR_semtimedop
729 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
730               unsigned, nsops, const struct timespec *, timeout)
731 #endif
732 #if defined(TARGET_NR_mq_timedsend) || \
733     defined(TARGET_NR_mq_timedsend_time64)
734 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
735               size_t, len, unsigned, prio, const struct timespec *, timeout)
736 #endif
737 #if defined(TARGET_NR_mq_timedreceive) || \
738     defined(TARGET_NR_mq_timedreceive_time64)
739 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
740               size_t, len, unsigned *, prio, const struct timespec *, timeout)
741 #endif
742 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
743 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
744               int, outfd, loff_t *, poutoff, size_t, length,
745               unsigned int, flags)
746 #endif
747 
748 /* We do ioctl like this rather than via safe_syscall3 to preserve the
749  * "third argument might be integer or pointer or not present" behaviour of
750  * the libc function.
751  */
752 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
753 /* Similarly for fcntl. Note that callers must always:
754  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
755  *  use the flock64 struct rather than unsuffixed flock
756  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
757  */
758 #ifdef __NR_fcntl64
759 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
760 #else
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
762 #endif
763 
764 static inline int host_to_target_sock_type(int host_type)
765 {
766     int target_type;
767 
768     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
769     case SOCK_DGRAM:
770         target_type = TARGET_SOCK_DGRAM;
771         break;
772     case SOCK_STREAM:
773         target_type = TARGET_SOCK_STREAM;
774         break;
775     default:
776         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
777         break;
778     }
779 
780 #if defined(SOCK_CLOEXEC)
781     if (host_type & SOCK_CLOEXEC) {
782         target_type |= TARGET_SOCK_CLOEXEC;
783     }
784 #endif
785 
786 #if defined(SOCK_NONBLOCK)
787     if (host_type & SOCK_NONBLOCK) {
788         target_type |= TARGET_SOCK_NONBLOCK;
789     }
790 #endif
791 
792     return target_type;
793 }
794 
795 static abi_ulong target_brk;
796 static abi_ulong target_original_brk;
797 static abi_ulong brk_page;
798 
799 void target_set_brk(abi_ulong new_brk)
800 {
801     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
802     brk_page = HOST_PAGE_ALIGN(target_brk);
803 }
804 
805 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
806 #define DEBUGF_BRK(message, args...)
807 
808 /* do_brk() must return target values and target errnos. */
809 abi_long do_brk(abi_ulong new_brk)
810 {
811     abi_long mapped_addr;
812     abi_ulong new_alloc_size;
813 
814     /* brk pointers are always untagged */
815 
816     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
817 
818     if (!new_brk) {
819         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
820         return target_brk;
821     }
822     if (new_brk < target_original_brk) {
823         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
824                    target_brk);
825         return target_brk;
826     }
827 
828     /* If the new brk is less than the highest page reserved to the
829      * target heap allocation, set it and we're almost done...  */
830     if (new_brk <= brk_page) {
831         /* Heap contents are initialized to zero, as for anonymous
832          * mapped pages.  */
833         if (new_brk > target_brk) {
834             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
835         }
836 	target_brk = new_brk;
837         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
838 	return target_brk;
839     }
840 
841     /* We need to allocate more memory after the brk... Note that
842      * we don't use MAP_FIXED because that will map over the top of
843      * any existing mapping (like the one with the host libc or qemu
844      * itself); instead we treat "mapped but at wrong address" as
845      * a failure and unmap again.
846      */
847     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
848     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
849                                         PROT_READ|PROT_WRITE,
850                                         MAP_ANON|MAP_PRIVATE, 0, 0));
851 
852     if (mapped_addr == brk_page) {
853         /* Heap contents are initialized to zero, as for anonymous
854          * mapped pages.  Technically the new pages are already
855          * initialized to zero since they *are* anonymous mapped
856          * pages, however we have to take care with the contents that
857          * come from the remaining part of the previous page: it may
858          * contains garbage data due to a previous heap usage (grown
859          * then shrunken).  */
860         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
861 
862         target_brk = new_brk;
863         brk_page = HOST_PAGE_ALIGN(target_brk);
864         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
865             target_brk);
866         return target_brk;
867     } else if (mapped_addr != -1) {
868         /* Mapped but at wrong address, meaning there wasn't actually
869          * enough space for this brk.
870          */
871         target_munmap(mapped_addr, new_alloc_size);
872         mapped_addr = -1;
873         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
874     }
875     else {
876         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
877     }
878 
879 #if defined(TARGET_ALPHA)
880     /* We (partially) emulate OSF/1 on Alpha, which requires we
881        return a proper errno, not an unchanged brk value.  */
882     return -TARGET_ENOMEM;
883 #endif
884     /* For everything else, return the previous break. */
885     return target_brk;
886 }
887 
888 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
889     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
890 static inline abi_long copy_from_user_fdset(fd_set *fds,
891                                             abi_ulong target_fds_addr,
892                                             int n)
893 {
894     int i, nw, j, k;
895     abi_ulong b, *target_fds;
896 
897     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
898     if (!(target_fds = lock_user(VERIFY_READ,
899                                  target_fds_addr,
900                                  sizeof(abi_ulong) * nw,
901                                  1)))
902         return -TARGET_EFAULT;
903 
904     FD_ZERO(fds);
905     k = 0;
906     for (i = 0; i < nw; i++) {
907         /* grab the abi_ulong */
908         __get_user(b, &target_fds[i]);
909         for (j = 0; j < TARGET_ABI_BITS; j++) {
910             /* check the bit inside the abi_ulong */
911             if ((b >> j) & 1)
912                 FD_SET(k, fds);
913             k++;
914         }
915     }
916 
917     unlock_user(target_fds, target_fds_addr, 0);
918 
919     return 0;
920 }
921 
922 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
923                                                  abi_ulong target_fds_addr,
924                                                  int n)
925 {
926     if (target_fds_addr) {
927         if (copy_from_user_fdset(fds, target_fds_addr, n))
928             return -TARGET_EFAULT;
929         *fds_ptr = fds;
930     } else {
931         *fds_ptr = NULL;
932     }
933     return 0;
934 }
935 
936 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
937                                           const fd_set *fds,
938                                           int n)
939 {
940     int i, nw, j, k;
941     abi_long v;
942     abi_ulong *target_fds;
943 
944     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
945     if (!(target_fds = lock_user(VERIFY_WRITE,
946                                  target_fds_addr,
947                                  sizeof(abi_ulong) * nw,
948                                  0)))
949         return -TARGET_EFAULT;
950 
951     k = 0;
952     for (i = 0; i < nw; i++) {
953         v = 0;
954         for (j = 0; j < TARGET_ABI_BITS; j++) {
955             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
956             k++;
957         }
958         __put_user(v, &target_fds[i]);
959     }
960 
961     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
962 
963     return 0;
964 }
965 #endif
966 
967 #if defined(__alpha__)
968 #define HOST_HZ 1024
969 #else
970 #define HOST_HZ 100
971 #endif
972 
973 static inline abi_long host_to_target_clock_t(long ticks)
974 {
975 #if HOST_HZ == TARGET_HZ
976     return ticks;
977 #else
978     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
979 #endif
980 }
981 
982 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
983                                              const struct rusage *rusage)
984 {
985     struct target_rusage *target_rusage;
986 
987     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
988         return -TARGET_EFAULT;
989     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
990     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
991     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
992     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
993     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
994     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
995     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
996     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
997     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
998     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
999     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1000     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1001     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1002     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1003     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1004     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1005     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1006     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1007     unlock_user_struct(target_rusage, target_addr, 1);
1008 
1009     return 0;
1010 }
1011 
1012 #ifdef TARGET_NR_setrlimit
1013 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1014 {
1015     abi_ulong target_rlim_swap;
1016     rlim_t result;
1017 
1018     target_rlim_swap = tswapal(target_rlim);
1019     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1020         return RLIM_INFINITY;
1021 
1022     result = target_rlim_swap;
1023     if (target_rlim_swap != (rlim_t)result)
1024         return RLIM_INFINITY;
1025 
1026     return result;
1027 }
1028 #endif
1029 
1030 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1031 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1032 {
1033     abi_ulong target_rlim_swap;
1034     abi_ulong result;
1035 
1036     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1037         target_rlim_swap = TARGET_RLIM_INFINITY;
1038     else
1039         target_rlim_swap = rlim;
1040     result = tswapal(target_rlim_swap);
1041 
1042     return result;
1043 }
1044 #endif
1045 
1046 static inline int target_to_host_resource(int code)
1047 {
1048     switch (code) {
1049     case TARGET_RLIMIT_AS:
1050         return RLIMIT_AS;
1051     case TARGET_RLIMIT_CORE:
1052         return RLIMIT_CORE;
1053     case TARGET_RLIMIT_CPU:
1054         return RLIMIT_CPU;
1055     case TARGET_RLIMIT_DATA:
1056         return RLIMIT_DATA;
1057     case TARGET_RLIMIT_FSIZE:
1058         return RLIMIT_FSIZE;
1059     case TARGET_RLIMIT_LOCKS:
1060         return RLIMIT_LOCKS;
1061     case TARGET_RLIMIT_MEMLOCK:
1062         return RLIMIT_MEMLOCK;
1063     case TARGET_RLIMIT_MSGQUEUE:
1064         return RLIMIT_MSGQUEUE;
1065     case TARGET_RLIMIT_NICE:
1066         return RLIMIT_NICE;
1067     case TARGET_RLIMIT_NOFILE:
1068         return RLIMIT_NOFILE;
1069     case TARGET_RLIMIT_NPROC:
1070         return RLIMIT_NPROC;
1071     case TARGET_RLIMIT_RSS:
1072         return RLIMIT_RSS;
1073     case TARGET_RLIMIT_RTPRIO:
1074         return RLIMIT_RTPRIO;
1075     case TARGET_RLIMIT_SIGPENDING:
1076         return RLIMIT_SIGPENDING;
1077     case TARGET_RLIMIT_STACK:
1078         return RLIMIT_STACK;
1079     default:
1080         return code;
1081     }
1082 }
1083 
1084 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1085                                               abi_ulong target_tv_addr)
1086 {
1087     struct target_timeval *target_tv;
1088 
1089     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1090         return -TARGET_EFAULT;
1091     }
1092 
1093     __get_user(tv->tv_sec, &target_tv->tv_sec);
1094     __get_user(tv->tv_usec, &target_tv->tv_usec);
1095 
1096     unlock_user_struct(target_tv, target_tv_addr, 0);
1097 
1098     return 0;
1099 }
1100 
1101 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1102                                             const struct timeval *tv)
1103 {
1104     struct target_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __put_user(tv->tv_sec, &target_tv->tv_sec);
1111     __put_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 1);
1114 
1115     return 0;
1116 }
1117 
1118 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1119 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1120                                                 abi_ulong target_tv_addr)
1121 {
1122     struct target__kernel_sock_timeval *target_tv;
1123 
1124     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1125         return -TARGET_EFAULT;
1126     }
1127 
1128     __get_user(tv->tv_sec, &target_tv->tv_sec);
1129     __get_user(tv->tv_usec, &target_tv->tv_usec);
1130 
1131     unlock_user_struct(target_tv, target_tv_addr, 0);
1132 
1133     return 0;
1134 }
1135 #endif
1136 
1137 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1138                                               const struct timeval *tv)
1139 {
1140     struct target__kernel_sock_timeval *target_tv;
1141 
1142     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1143         return -TARGET_EFAULT;
1144     }
1145 
1146     __put_user(tv->tv_sec, &target_tv->tv_sec);
1147     __put_user(tv->tv_usec, &target_tv->tv_usec);
1148 
1149     unlock_user_struct(target_tv, target_tv_addr, 1);
1150 
1151     return 0;
1152 }
1153 
1154 #if defined(TARGET_NR_futex) || \
1155     defined(TARGET_NR_rt_sigtimedwait) || \
1156     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1157     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1158     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1159     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1160     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1161     defined(TARGET_NR_timer_settime) || \
1162     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1163 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1164                                                abi_ulong target_addr)
1165 {
1166     struct target_timespec *target_ts;
1167 
1168     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1169         return -TARGET_EFAULT;
1170     }
1171     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1172     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1173     unlock_user_struct(target_ts, target_addr, 0);
1174     return 0;
1175 }
1176 #endif
1177 
1178 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1179     defined(TARGET_NR_timer_settime64) || \
1180     defined(TARGET_NR_mq_timedsend_time64) || \
1181     defined(TARGET_NR_mq_timedreceive_time64) || \
1182     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1183     defined(TARGET_NR_clock_nanosleep_time64) || \
1184     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1185     defined(TARGET_NR_utimensat) || \
1186     defined(TARGET_NR_utimensat_time64) || \
1187     defined(TARGET_NR_semtimedop_time64) || \
1188     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1189 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1190                                                  abi_ulong target_addr)
1191 {
1192     struct target__kernel_timespec *target_ts;
1193 
1194     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1195         return -TARGET_EFAULT;
1196     }
1197     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1198     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199     /* in 32bit mode, this drops the padding */
1200     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1201     unlock_user_struct(target_ts, target_addr, 0);
1202     return 0;
1203 }
1204 #endif
1205 
1206 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1207                                                struct timespec *host_ts)
1208 {
1209     struct target_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     unlock_user_struct(target_ts, target_addr, 1);
1217     return 0;
1218 }
1219 
1220 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1221                                                  struct timespec *host_ts)
1222 {
1223     struct target__kernel_timespec *target_ts;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1229     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230     unlock_user_struct(target_ts, target_addr, 1);
1231     return 0;
1232 }
1233 
1234 #if defined(TARGET_NR_gettimeofday)
1235 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1236                                              struct timezone *tz)
1237 {
1238     struct target_timezone *target_tz;
1239 
1240     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1241         return -TARGET_EFAULT;
1242     }
1243 
1244     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1245     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1246 
1247     unlock_user_struct(target_tz, target_tz_addr, 1);
1248 
1249     return 0;
1250 }
1251 #endif
1252 
1253 #if defined(TARGET_NR_settimeofday)
1254 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1255                                                abi_ulong target_tz_addr)
1256 {
1257     struct target_timezone *target_tz;
1258 
1259     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1260         return -TARGET_EFAULT;
1261     }
1262 
1263     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1264     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1265 
1266     unlock_user_struct(target_tz, target_tz_addr, 0);
1267 
1268     return 0;
1269 }
1270 #endif
1271 
1272 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1273 #include <mqueue.h>
1274 
1275 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1276                                               abi_ulong target_mq_attr_addr)
1277 {
1278     struct target_mq_attr *target_mq_attr;
1279 
1280     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1281                           target_mq_attr_addr, 1))
1282         return -TARGET_EFAULT;
1283 
1284     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1285     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1286     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1287     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1288 
1289     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1290 
1291     return 0;
1292 }
1293 
1294 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1295                                             const struct mq_attr *attr)
1296 {
1297     struct target_mq_attr *target_mq_attr;
1298 
1299     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1300                           target_mq_attr_addr, 0))
1301         return -TARGET_EFAULT;
1302 
1303     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1304     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1305     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1306     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1307 
1308     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1309 
1310     return 0;
1311 }
1312 #endif
1313 
1314 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1315 /* do_select() must return target values and target errnos. */
1316 static abi_long do_select(int n,
1317                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1318                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1319 {
1320     fd_set rfds, wfds, efds;
1321     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1322     struct timeval tv;
1323     struct timespec ts, *ts_ptr;
1324     abi_long ret;
1325 
1326     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1327     if (ret) {
1328         return ret;
1329     }
1330     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1331     if (ret) {
1332         return ret;
1333     }
1334     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338 
1339     if (target_tv_addr) {
1340         if (copy_from_user_timeval(&tv, target_tv_addr))
1341             return -TARGET_EFAULT;
1342         ts.tv_sec = tv.tv_sec;
1343         ts.tv_nsec = tv.tv_usec * 1000;
1344         ts_ptr = &ts;
1345     } else {
1346         ts_ptr = NULL;
1347     }
1348 
1349     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1350                                   ts_ptr, NULL));
1351 
1352     if (!is_error(ret)) {
1353         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1354             return -TARGET_EFAULT;
1355         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1356             return -TARGET_EFAULT;
1357         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1358             return -TARGET_EFAULT;
1359 
1360         if (target_tv_addr) {
1361             tv.tv_sec = ts.tv_sec;
1362             tv.tv_usec = ts.tv_nsec / 1000;
1363             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1364                 return -TARGET_EFAULT;
1365             }
1366         }
1367     }
1368 
1369     return ret;
1370 }
1371 
1372 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1373 static abi_long do_old_select(abi_ulong arg1)
1374 {
1375     struct target_sel_arg_struct *sel;
1376     abi_ulong inp, outp, exp, tvp;
1377     long nsel;
1378 
1379     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1380         return -TARGET_EFAULT;
1381     }
1382 
1383     nsel = tswapal(sel->n);
1384     inp = tswapal(sel->inp);
1385     outp = tswapal(sel->outp);
1386     exp = tswapal(sel->exp);
1387     tvp = tswapal(sel->tvp);
1388 
1389     unlock_user_struct(sel, arg1, 0);
1390 
1391     return do_select(nsel, inp, outp, exp, tvp);
1392 }
1393 #endif
1394 #endif
1395 
1396 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1397 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1398                             abi_long arg4, abi_long arg5, abi_long arg6,
1399                             bool time64)
1400 {
1401     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1402     fd_set rfds, wfds, efds;
1403     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     /*
1408      * The 6th arg is actually two args smashed together,
1409      * so we cannot use the C library.
1410      */
1411     sigset_t set;
1412     struct {
1413         sigset_t *set;
1414         size_t size;
1415     } sig, *sig_ptr;
1416 
1417     abi_ulong arg_sigset, arg_sigsize, *arg7;
1418     target_sigset_t *target_sigset;
1419 
1420     n = arg1;
1421     rfd_addr = arg2;
1422     wfd_addr = arg3;
1423     efd_addr = arg4;
1424     ts_addr = arg5;
1425 
1426     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1427     if (ret) {
1428         return ret;
1429     }
1430     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438 
1439     /*
1440      * This takes a timespec, and not a timeval, so we cannot
1441      * use the do_select() helper ...
1442      */
1443     if (ts_addr) {
1444         if (time64) {
1445             if (target_to_host_timespec64(&ts, ts_addr)) {
1446                 return -TARGET_EFAULT;
1447             }
1448         } else {
1449             if (target_to_host_timespec(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         }
1453             ts_ptr = &ts;
1454     } else {
1455         ts_ptr = NULL;
1456     }
1457 
1458     /* Extract the two packed args for the sigset */
1459     if (arg6) {
1460         sig_ptr = &sig;
1461         sig.size = SIGSET_T_SIZE;
1462 
1463         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1464         if (!arg7) {
1465             return -TARGET_EFAULT;
1466         }
1467         arg_sigset = tswapal(arg7[0]);
1468         arg_sigsize = tswapal(arg7[1]);
1469         unlock_user(arg7, arg6, 0);
1470 
1471         if (arg_sigset) {
1472             sig.set = &set;
1473             if (arg_sigsize != sizeof(*target_sigset)) {
1474                 /* Like the kernel, we enforce correct size sigsets */
1475                 return -TARGET_EINVAL;
1476             }
1477             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1478                                       sizeof(*target_sigset), 1);
1479             if (!target_sigset) {
1480                 return -TARGET_EFAULT;
1481             }
1482             target_to_host_sigset(&set, target_sigset);
1483             unlock_user(target_sigset, arg_sigset, 0);
1484         } else {
1485             sig.set = NULL;
1486         }
1487     } else {
1488         sig_ptr = NULL;
1489     }
1490 
1491     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1492                                   ts_ptr, sig_ptr));
1493 
1494     if (!is_error(ret)) {
1495         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1496             return -TARGET_EFAULT;
1497         }
1498         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (time64) {
1505             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         } else {
1509             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1510                 return -TARGET_EFAULT;
1511             }
1512         }
1513     }
1514     return ret;
1515 }
1516 #endif
1517 
1518 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1519     defined(TARGET_NR_ppoll_time64)
1520 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1521                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1522 {
1523     struct target_pollfd *target_pfd;
1524     unsigned int nfds = arg2;
1525     struct pollfd *pfd;
1526     unsigned int i;
1527     abi_long ret;
1528 
1529     pfd = NULL;
1530     target_pfd = NULL;
1531     if (nfds) {
1532         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1533             return -TARGET_EINVAL;
1534         }
1535         target_pfd = lock_user(VERIFY_WRITE, arg1,
1536                                sizeof(struct target_pollfd) * nfds, 1);
1537         if (!target_pfd) {
1538             return -TARGET_EFAULT;
1539         }
1540 
1541         pfd = alloca(sizeof(struct pollfd) * nfds);
1542         for (i = 0; i < nfds; i++) {
1543             pfd[i].fd = tswap32(target_pfd[i].fd);
1544             pfd[i].events = tswap16(target_pfd[i].events);
1545         }
1546     }
1547     if (ppoll) {
1548         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1549         target_sigset_t *target_set;
1550         sigset_t _set, *set = &_set;
1551 
1552         if (arg3) {
1553             if (time64) {
1554                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             } else {
1559                 if (target_to_host_timespec(timeout_ts, arg3)) {
1560                     unlock_user(target_pfd, arg1, 0);
1561                     return -TARGET_EFAULT;
1562                 }
1563             }
1564         } else {
1565             timeout_ts = NULL;
1566         }
1567 
1568         if (arg4) {
1569             if (arg5 != sizeof(target_sigset_t)) {
1570                 unlock_user(target_pfd, arg1, 0);
1571                 return -TARGET_EINVAL;
1572             }
1573 
1574             target_set = lock_user(VERIFY_READ, arg4,
1575                                    sizeof(target_sigset_t), 1);
1576             if (!target_set) {
1577                 unlock_user(target_pfd, arg1, 0);
1578                 return -TARGET_EFAULT;
1579             }
1580             target_to_host_sigset(set, target_set);
1581         } else {
1582             set = NULL;
1583         }
1584 
1585         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1586                                    set, SIGSET_T_SIZE));
1587 
1588         if (!is_error(ret) && arg3) {
1589             if (time64) {
1590                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             } else {
1594                 if (host_to_target_timespec(arg3, timeout_ts)) {
1595                     return -TARGET_EFAULT;
1596                 }
1597             }
1598         }
1599         if (arg4) {
1600             unlock_user(target_set, arg4, 0);
1601         }
1602     } else {
1603           struct timespec ts, *pts;
1604 
1605           if (arg3 >= 0) {
1606               /* Convert ms to secs, ns */
1607               ts.tv_sec = arg3 / 1000;
1608               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1609               pts = &ts;
1610           } else {
1611               /* -ve poll() timeout means "infinite" */
1612               pts = NULL;
1613           }
1614           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1615     }
1616 
1617     if (!is_error(ret)) {
1618         for (i = 0; i < nfds; i++) {
1619             target_pfd[i].revents = tswap16(pfd[i].revents);
1620         }
1621     }
1622     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1623     return ret;
1624 }
1625 #endif
1626 
1627 static abi_long do_pipe2(int host_pipe[], int flags)
1628 {
1629 #ifdef CONFIG_PIPE2
1630     return pipe2(host_pipe, flags);
1631 #else
1632     return -ENOSYS;
1633 #endif
1634 }
1635 
1636 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1637                         int flags, int is_pipe2)
1638 {
1639     int host_pipe[2];
1640     abi_long ret;
1641     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1642 
1643     if (is_error(ret))
1644         return get_errno(ret);
1645 
1646     /* Several targets have special calling conventions for the original
1647        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1648     if (!is_pipe2) {
1649 #if defined(TARGET_ALPHA)
1650         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1651         return host_pipe[0];
1652 #elif defined(TARGET_MIPS)
1653         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1654         return host_pipe[0];
1655 #elif defined(TARGET_SH4)
1656         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1657         return host_pipe[0];
1658 #elif defined(TARGET_SPARC)
1659         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1660         return host_pipe[0];
1661 #endif
1662     }
1663 
1664     if (put_user_s32(host_pipe[0], pipedes)
1665         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1666         return -TARGET_EFAULT;
1667     return get_errno(ret);
1668 }
1669 
1670 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1671                                               abi_ulong target_addr,
1672                                               socklen_t len)
1673 {
1674     struct target_ip_mreqn *target_smreqn;
1675 
1676     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1677     if (!target_smreqn)
1678         return -TARGET_EFAULT;
1679     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1680     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1681     if (len == sizeof(struct target_ip_mreqn))
1682         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1683     unlock_user(target_smreqn, target_addr, 0);
1684 
1685     return 0;
1686 }
1687 
1688 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1689                                                abi_ulong target_addr,
1690                                                socklen_t len)
1691 {
1692     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1693     sa_family_t sa_family;
1694     struct target_sockaddr *target_saddr;
1695 
1696     if (fd_trans_target_to_host_addr(fd)) {
1697         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1698     }
1699 
1700     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1701     if (!target_saddr)
1702         return -TARGET_EFAULT;
1703 
1704     sa_family = tswap16(target_saddr->sa_family);
1705 
1706     /* Oops. The caller might send a incomplete sun_path; sun_path
1707      * must be terminated by \0 (see the manual page), but
1708      * unfortunately it is quite common to specify sockaddr_un
1709      * length as "strlen(x->sun_path)" while it should be
1710      * "strlen(...) + 1". We'll fix that here if needed.
1711      * Linux kernel has a similar feature.
1712      */
1713 
1714     if (sa_family == AF_UNIX) {
1715         if (len < unix_maxlen && len > 0) {
1716             char *cp = (char*)target_saddr;
1717 
1718             if ( cp[len-1] && !cp[len] )
1719                 len++;
1720         }
1721         if (len > unix_maxlen)
1722             len = unix_maxlen;
1723     }
1724 
1725     memcpy(addr, target_saddr, len);
1726     addr->sa_family = sa_family;
1727     if (sa_family == AF_NETLINK) {
1728         struct sockaddr_nl *nladdr;
1729 
1730         nladdr = (struct sockaddr_nl *)addr;
1731         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1732         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1733     } else if (sa_family == AF_PACKET) {
1734 	struct target_sockaddr_ll *lladdr;
1735 
1736 	lladdr = (struct target_sockaddr_ll *)addr;
1737 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1738 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1739     }
1740     unlock_user(target_saddr, target_addr, 0);
1741 
1742     return 0;
1743 }
1744 
1745 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1746                                                struct sockaddr *addr,
1747                                                socklen_t len)
1748 {
1749     struct target_sockaddr *target_saddr;
1750 
1751     if (len == 0) {
1752         return 0;
1753     }
1754     assert(addr);
1755 
1756     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1757     if (!target_saddr)
1758         return -TARGET_EFAULT;
1759     memcpy(target_saddr, addr, len);
1760     if (len >= offsetof(struct target_sockaddr, sa_family) +
1761         sizeof(target_saddr->sa_family)) {
1762         target_saddr->sa_family = tswap16(addr->sa_family);
1763     }
1764     if (addr->sa_family == AF_NETLINK &&
1765         len >= sizeof(struct target_sockaddr_nl)) {
1766         struct target_sockaddr_nl *target_nl =
1767                (struct target_sockaddr_nl *)target_saddr;
1768         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1769         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1770     } else if (addr->sa_family == AF_PACKET) {
1771         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1772         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1773         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1774     } else if (addr->sa_family == AF_INET6 &&
1775                len >= sizeof(struct target_sockaddr_in6)) {
1776         struct target_sockaddr_in6 *target_in6 =
1777                (struct target_sockaddr_in6 *)target_saddr;
1778         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1779     }
1780     unlock_user(target_saddr, target_addr, len);
1781 
1782     return 0;
1783 }
1784 
1785 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1786                                            struct target_msghdr *target_msgh)
1787 {
1788     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1789     abi_long msg_controllen;
1790     abi_ulong target_cmsg_addr;
1791     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1792     socklen_t space = 0;
1793 
1794     msg_controllen = tswapal(target_msgh->msg_controllen);
1795     if (msg_controllen < sizeof (struct target_cmsghdr))
1796         goto the_end;
1797     target_cmsg_addr = tswapal(target_msgh->msg_control);
1798     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1799     target_cmsg_start = target_cmsg;
1800     if (!target_cmsg)
1801         return -TARGET_EFAULT;
1802 
1803     while (cmsg && target_cmsg) {
1804         void *data = CMSG_DATA(cmsg);
1805         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1806 
1807         int len = tswapal(target_cmsg->cmsg_len)
1808             - sizeof(struct target_cmsghdr);
1809 
1810         space += CMSG_SPACE(len);
1811         if (space > msgh->msg_controllen) {
1812             space -= CMSG_SPACE(len);
1813             /* This is a QEMU bug, since we allocated the payload
1814              * area ourselves (unlike overflow in host-to-target
1815              * conversion, which is just the guest giving us a buffer
1816              * that's too small). It can't happen for the payload types
1817              * we currently support; if it becomes an issue in future
1818              * we would need to improve our allocation strategy to
1819              * something more intelligent than "twice the size of the
1820              * target buffer we're reading from".
1821              */
1822             qemu_log_mask(LOG_UNIMP,
1823                           ("Unsupported ancillary data %d/%d: "
1824                            "unhandled msg size\n"),
1825                           tswap32(target_cmsg->cmsg_level),
1826                           tswap32(target_cmsg->cmsg_type));
1827             break;
1828         }
1829 
1830         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1831             cmsg->cmsg_level = SOL_SOCKET;
1832         } else {
1833             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1834         }
1835         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1836         cmsg->cmsg_len = CMSG_LEN(len);
1837 
1838         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1839             int *fd = (int *)data;
1840             int *target_fd = (int *)target_data;
1841             int i, numfds = len / sizeof(int);
1842 
1843             for (i = 0; i < numfds; i++) {
1844                 __get_user(fd[i], target_fd + i);
1845             }
1846         } else if (cmsg->cmsg_level == SOL_SOCKET
1847                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1848             struct ucred *cred = (struct ucred *)data;
1849             struct target_ucred *target_cred =
1850                 (struct target_ucred *)target_data;
1851 
1852             __get_user(cred->pid, &target_cred->pid);
1853             __get_user(cred->uid, &target_cred->uid);
1854             __get_user(cred->gid, &target_cred->gid);
1855         } else {
1856             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1857                           cmsg->cmsg_level, cmsg->cmsg_type);
1858             memcpy(data, target_data, len);
1859         }
1860 
1861         cmsg = CMSG_NXTHDR(msgh, cmsg);
1862         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1863                                          target_cmsg_start);
1864     }
1865     unlock_user(target_cmsg, target_cmsg_addr, 0);
1866  the_end:
1867     msgh->msg_controllen = space;
1868     return 0;
1869 }
1870 
1871 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1872                                            struct msghdr *msgh)
1873 {
1874     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1875     abi_long msg_controllen;
1876     abi_ulong target_cmsg_addr;
1877     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1878     socklen_t space = 0;
1879 
1880     msg_controllen = tswapal(target_msgh->msg_controllen);
1881     if (msg_controllen < sizeof (struct target_cmsghdr))
1882         goto the_end;
1883     target_cmsg_addr = tswapal(target_msgh->msg_control);
1884     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1885     target_cmsg_start = target_cmsg;
1886     if (!target_cmsg)
1887         return -TARGET_EFAULT;
1888 
1889     while (cmsg && target_cmsg) {
1890         void *data = CMSG_DATA(cmsg);
1891         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1892 
1893         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1894         int tgt_len, tgt_space;
1895 
1896         /* We never copy a half-header but may copy half-data;
1897          * this is Linux's behaviour in put_cmsg(). Note that
1898          * truncation here is a guest problem (which we report
1899          * to the guest via the CTRUNC bit), unlike truncation
1900          * in target_to_host_cmsg, which is a QEMU bug.
1901          */
1902         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1903             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1904             break;
1905         }
1906 
1907         if (cmsg->cmsg_level == SOL_SOCKET) {
1908             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1909         } else {
1910             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1911         }
1912         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1913 
1914         /* Payload types which need a different size of payload on
1915          * the target must adjust tgt_len here.
1916          */
1917         tgt_len = len;
1918         switch (cmsg->cmsg_level) {
1919         case SOL_SOCKET:
1920             switch (cmsg->cmsg_type) {
1921             case SO_TIMESTAMP:
1922                 tgt_len = sizeof(struct target_timeval);
1923                 break;
1924             default:
1925                 break;
1926             }
1927             break;
1928         default:
1929             break;
1930         }
1931 
1932         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1933             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1934             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1935         }
1936 
1937         /* We must now copy-and-convert len bytes of payload
1938          * into tgt_len bytes of destination space. Bear in mind
1939          * that in both source and destination we may be dealing
1940          * with a truncated value!
1941          */
1942         switch (cmsg->cmsg_level) {
1943         case SOL_SOCKET:
1944             switch (cmsg->cmsg_type) {
1945             case SCM_RIGHTS:
1946             {
1947                 int *fd = (int *)data;
1948                 int *target_fd = (int *)target_data;
1949                 int i, numfds = tgt_len / sizeof(int);
1950 
1951                 for (i = 0; i < numfds; i++) {
1952                     __put_user(fd[i], target_fd + i);
1953                 }
1954                 break;
1955             }
1956             case SO_TIMESTAMP:
1957             {
1958                 struct timeval *tv = (struct timeval *)data;
1959                 struct target_timeval *target_tv =
1960                     (struct target_timeval *)target_data;
1961 
1962                 if (len != sizeof(struct timeval) ||
1963                     tgt_len != sizeof(struct target_timeval)) {
1964                     goto unimplemented;
1965                 }
1966 
1967                 /* copy struct timeval to target */
1968                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1969                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1970                 break;
1971             }
1972             case SCM_CREDENTIALS:
1973             {
1974                 struct ucred *cred = (struct ucred *)data;
1975                 struct target_ucred *target_cred =
1976                     (struct target_ucred *)target_data;
1977 
1978                 __put_user(cred->pid, &target_cred->pid);
1979                 __put_user(cred->uid, &target_cred->uid);
1980                 __put_user(cred->gid, &target_cred->gid);
1981                 break;
1982             }
1983             default:
1984                 goto unimplemented;
1985             }
1986             break;
1987 
1988         case SOL_IP:
1989             switch (cmsg->cmsg_type) {
1990             case IP_TTL:
1991             {
1992                 uint32_t *v = (uint32_t *)data;
1993                 uint32_t *t_int = (uint32_t *)target_data;
1994 
1995                 if (len != sizeof(uint32_t) ||
1996                     tgt_len != sizeof(uint32_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(*v, t_int);
2000                 break;
2001             }
2002             case IP_RECVERR:
2003             {
2004                 struct errhdr_t {
2005                    struct sock_extended_err ee;
2006                    struct sockaddr_in offender;
2007                 };
2008                 struct errhdr_t *errh = (struct errhdr_t *)data;
2009                 struct errhdr_t *target_errh =
2010                     (struct errhdr_t *)target_data;
2011 
2012                 if (len != sizeof(struct errhdr_t) ||
2013                     tgt_len != sizeof(struct errhdr_t)) {
2014                     goto unimplemented;
2015                 }
2016                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2017                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2018                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2019                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2020                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2021                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2022                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2023                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2024                     (void *) &errh->offender, sizeof(errh->offender));
2025                 break;
2026             }
2027             default:
2028                 goto unimplemented;
2029             }
2030             break;
2031 
2032         case SOL_IPV6:
2033             switch (cmsg->cmsg_type) {
2034             case IPV6_HOPLIMIT:
2035             {
2036                 uint32_t *v = (uint32_t *)data;
2037                 uint32_t *t_int = (uint32_t *)target_data;
2038 
2039                 if (len != sizeof(uint32_t) ||
2040                     tgt_len != sizeof(uint32_t)) {
2041                     goto unimplemented;
2042                 }
2043                 __put_user(*v, t_int);
2044                 break;
2045             }
2046             case IPV6_RECVERR:
2047             {
2048                 struct errhdr6_t {
2049                    struct sock_extended_err ee;
2050                    struct sockaddr_in6 offender;
2051                 };
2052                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2053                 struct errhdr6_t *target_errh =
2054                     (struct errhdr6_t *)target_data;
2055 
2056                 if (len != sizeof(struct errhdr6_t) ||
2057                     tgt_len != sizeof(struct errhdr6_t)) {
2058                     goto unimplemented;
2059                 }
2060                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2061                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2062                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2063                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2064                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2065                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2066                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2067                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2068                     (void *) &errh->offender, sizeof(errh->offender));
2069                 break;
2070             }
2071             default:
2072                 goto unimplemented;
2073             }
2074             break;
2075 
2076         default:
2077         unimplemented:
2078             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2079                           cmsg->cmsg_level, cmsg->cmsg_type);
2080             memcpy(target_data, data, MIN(len, tgt_len));
2081             if (tgt_len > len) {
2082                 memset(target_data + len, 0, tgt_len - len);
2083             }
2084         }
2085 
2086         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2087         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2088         if (msg_controllen < tgt_space) {
2089             tgt_space = msg_controllen;
2090         }
2091         msg_controllen -= tgt_space;
2092         space += tgt_space;
2093         cmsg = CMSG_NXTHDR(msgh, cmsg);
2094         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2095                                          target_cmsg_start);
2096     }
2097     unlock_user(target_cmsg, target_cmsg_addr, space);
2098  the_end:
2099     target_msgh->msg_controllen = tswapal(space);
2100     return 0;
2101 }
2102 
2103 /* do_setsockopt() Must return target values and target errnos. */
2104 static abi_long do_setsockopt(int sockfd, int level, int optname,
2105                               abi_ulong optval_addr, socklen_t optlen)
2106 {
2107     abi_long ret;
2108     int val;
2109     struct ip_mreqn *ip_mreq;
2110     struct ip_mreq_source *ip_mreq_source;
2111 
2112     switch(level) {
2113     case SOL_TCP:
2114     case SOL_UDP:
2115         /* TCP and UDP options all take an 'int' value.  */
2116         if (optlen < sizeof(uint32_t))
2117             return -TARGET_EINVAL;
2118 
2119         if (get_user_u32(val, optval_addr))
2120             return -TARGET_EFAULT;
2121         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2122         break;
2123     case SOL_IP:
2124         switch(optname) {
2125         case IP_TOS:
2126         case IP_TTL:
2127         case IP_HDRINCL:
2128         case IP_ROUTER_ALERT:
2129         case IP_RECVOPTS:
2130         case IP_RETOPTS:
2131         case IP_PKTINFO:
2132         case IP_MTU_DISCOVER:
2133         case IP_RECVERR:
2134         case IP_RECVTTL:
2135         case IP_RECVTOS:
2136 #ifdef IP_FREEBIND
2137         case IP_FREEBIND:
2138 #endif
2139         case IP_MULTICAST_TTL:
2140         case IP_MULTICAST_LOOP:
2141             val = 0;
2142             if (optlen >= sizeof(uint32_t)) {
2143                 if (get_user_u32(val, optval_addr))
2144                     return -TARGET_EFAULT;
2145             } else if (optlen >= 1) {
2146                 if (get_user_u8(val, optval_addr))
2147                     return -TARGET_EFAULT;
2148             }
2149             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2150             break;
2151         case IP_ADD_MEMBERSHIP:
2152         case IP_DROP_MEMBERSHIP:
2153             if (optlen < sizeof (struct target_ip_mreq) ||
2154                 optlen > sizeof (struct target_ip_mreqn))
2155                 return -TARGET_EINVAL;
2156 
2157             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2158             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2159             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2160             break;
2161 
2162         case IP_BLOCK_SOURCE:
2163         case IP_UNBLOCK_SOURCE:
2164         case IP_ADD_SOURCE_MEMBERSHIP:
2165         case IP_DROP_SOURCE_MEMBERSHIP:
2166             if (optlen != sizeof (struct target_ip_mreq_source))
2167                 return -TARGET_EINVAL;
2168 
2169             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2170             if (!ip_mreq_source) {
2171                 return -TARGET_EFAULT;
2172             }
2173             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2174             unlock_user (ip_mreq_source, optval_addr, 0);
2175             break;
2176 
2177         default:
2178             goto unimplemented;
2179         }
2180         break;
2181     case SOL_IPV6:
2182         switch (optname) {
2183         case IPV6_MTU_DISCOVER:
2184         case IPV6_MTU:
2185         case IPV6_V6ONLY:
2186         case IPV6_RECVPKTINFO:
2187         case IPV6_UNICAST_HOPS:
2188         case IPV6_MULTICAST_HOPS:
2189         case IPV6_MULTICAST_LOOP:
2190         case IPV6_RECVERR:
2191         case IPV6_RECVHOPLIMIT:
2192         case IPV6_2292HOPLIMIT:
2193         case IPV6_CHECKSUM:
2194         case IPV6_ADDRFORM:
2195         case IPV6_2292PKTINFO:
2196         case IPV6_RECVTCLASS:
2197         case IPV6_RECVRTHDR:
2198         case IPV6_2292RTHDR:
2199         case IPV6_RECVHOPOPTS:
2200         case IPV6_2292HOPOPTS:
2201         case IPV6_RECVDSTOPTS:
2202         case IPV6_2292DSTOPTS:
2203         case IPV6_TCLASS:
2204         case IPV6_ADDR_PREFERENCES:
2205 #ifdef IPV6_RECVPATHMTU
2206         case IPV6_RECVPATHMTU:
2207 #endif
2208 #ifdef IPV6_TRANSPARENT
2209         case IPV6_TRANSPARENT:
2210 #endif
2211 #ifdef IPV6_FREEBIND
2212         case IPV6_FREEBIND:
2213 #endif
2214 #ifdef IPV6_RECVORIGDSTADDR
2215         case IPV6_RECVORIGDSTADDR:
2216 #endif
2217             val = 0;
2218             if (optlen < sizeof(uint32_t)) {
2219                 return -TARGET_EINVAL;
2220             }
2221             if (get_user_u32(val, optval_addr)) {
2222                 return -TARGET_EFAULT;
2223             }
2224             ret = get_errno(setsockopt(sockfd, level, optname,
2225                                        &val, sizeof(val)));
2226             break;
2227         case IPV6_PKTINFO:
2228         {
2229             struct in6_pktinfo pki;
2230 
2231             if (optlen < sizeof(pki)) {
2232                 return -TARGET_EINVAL;
2233             }
2234 
2235             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2236                 return -TARGET_EFAULT;
2237             }
2238 
2239             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2240 
2241             ret = get_errno(setsockopt(sockfd, level, optname,
2242                                        &pki, sizeof(pki)));
2243             break;
2244         }
2245         case IPV6_ADD_MEMBERSHIP:
2246         case IPV6_DROP_MEMBERSHIP:
2247         {
2248             struct ipv6_mreq ipv6mreq;
2249 
2250             if (optlen < sizeof(ipv6mreq)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2255                 return -TARGET_EFAULT;
2256             }
2257 
2258             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2259 
2260             ret = get_errno(setsockopt(sockfd, level, optname,
2261                                        &ipv6mreq, sizeof(ipv6mreq)));
2262             break;
2263         }
2264         default:
2265             goto unimplemented;
2266         }
2267         break;
2268     case SOL_ICMPV6:
2269         switch (optname) {
2270         case ICMPV6_FILTER:
2271         {
2272             struct icmp6_filter icmp6f;
2273 
2274             if (optlen > sizeof(icmp6f)) {
2275                 optlen = sizeof(icmp6f);
2276             }
2277 
2278             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2279                 return -TARGET_EFAULT;
2280             }
2281 
2282             for (val = 0; val < 8; val++) {
2283                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2284             }
2285 
2286             ret = get_errno(setsockopt(sockfd, level, optname,
2287                                        &icmp6f, optlen));
2288             break;
2289         }
2290         default:
2291             goto unimplemented;
2292         }
2293         break;
2294     case SOL_RAW:
2295         switch (optname) {
2296         case ICMP_FILTER:
2297         case IPV6_CHECKSUM:
2298             /* those take an u32 value */
2299             if (optlen < sizeof(uint32_t)) {
2300                 return -TARGET_EINVAL;
2301             }
2302 
2303             if (get_user_u32(val, optval_addr)) {
2304                 return -TARGET_EFAULT;
2305             }
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        &val, sizeof(val)));
2308             break;
2309 
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2315     case SOL_ALG:
2316         switch (optname) {
2317         case ALG_SET_KEY:
2318         {
2319             char *alg_key = g_malloc(optlen);
2320 
2321             if (!alg_key) {
2322                 return -TARGET_ENOMEM;
2323             }
2324             if (copy_from_user(alg_key, optval_addr, optlen)) {
2325                 g_free(alg_key);
2326                 return -TARGET_EFAULT;
2327             }
2328             ret = get_errno(setsockopt(sockfd, level, optname,
2329                                        alg_key, optlen));
2330             g_free(alg_key);
2331             break;
2332         }
2333         case ALG_SET_AEAD_AUTHSIZE:
2334         {
2335             ret = get_errno(setsockopt(sockfd, level, optname,
2336                                        NULL, optlen));
2337             break;
2338         }
2339         default:
2340             goto unimplemented;
2341         }
2342         break;
2343 #endif
2344     case TARGET_SOL_SOCKET:
2345         switch (optname) {
2346         case TARGET_SO_RCVTIMEO:
2347         {
2348                 struct timeval tv;
2349 
2350                 optname = SO_RCVTIMEO;
2351 
2352 set_timeout:
2353                 if (optlen != sizeof(struct target_timeval)) {
2354                     return -TARGET_EINVAL;
2355                 }
2356 
2357                 if (copy_from_user_timeval(&tv, optval_addr)) {
2358                     return -TARGET_EFAULT;
2359                 }
2360 
2361                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2362                                 &tv, sizeof(tv)));
2363                 return ret;
2364         }
2365         case TARGET_SO_SNDTIMEO:
2366                 optname = SO_SNDTIMEO;
2367                 goto set_timeout;
2368         case TARGET_SO_ATTACH_FILTER:
2369         {
2370                 struct target_sock_fprog *tfprog;
2371                 struct target_sock_filter *tfilter;
2372                 struct sock_fprog fprog;
2373                 struct sock_filter *filter;
2374                 int i;
2375 
2376                 if (optlen != sizeof(*tfprog)) {
2377                     return -TARGET_EINVAL;
2378                 }
2379                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2380                     return -TARGET_EFAULT;
2381                 }
2382                 if (!lock_user_struct(VERIFY_READ, tfilter,
2383                                       tswapal(tfprog->filter), 0)) {
2384                     unlock_user_struct(tfprog, optval_addr, 1);
2385                     return -TARGET_EFAULT;
2386                 }
2387 
2388                 fprog.len = tswap16(tfprog->len);
2389                 filter = g_try_new(struct sock_filter, fprog.len);
2390                 if (filter == NULL) {
2391                     unlock_user_struct(tfilter, tfprog->filter, 1);
2392                     unlock_user_struct(tfprog, optval_addr, 1);
2393                     return -TARGET_ENOMEM;
2394                 }
2395                 for (i = 0; i < fprog.len; i++) {
2396                     filter[i].code = tswap16(tfilter[i].code);
2397                     filter[i].jt = tfilter[i].jt;
2398                     filter[i].jf = tfilter[i].jf;
2399                     filter[i].k = tswap32(tfilter[i].k);
2400                 }
2401                 fprog.filter = filter;
2402 
2403                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2404                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2405                 g_free(filter);
2406 
2407                 unlock_user_struct(tfilter, tfprog->filter, 1);
2408                 unlock_user_struct(tfprog, optval_addr, 1);
2409                 return ret;
2410         }
2411 	case TARGET_SO_BINDTODEVICE:
2412 	{
2413 		char *dev_ifname, *addr_ifname;
2414 
2415 		if (optlen > IFNAMSIZ - 1) {
2416 		    optlen = IFNAMSIZ - 1;
2417 		}
2418 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2419 		if (!dev_ifname) {
2420 		    return -TARGET_EFAULT;
2421 		}
2422 		optname = SO_BINDTODEVICE;
2423 		addr_ifname = alloca(IFNAMSIZ);
2424 		memcpy(addr_ifname, dev_ifname, optlen);
2425 		addr_ifname[optlen] = 0;
2426 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2427                                            addr_ifname, optlen));
2428 		unlock_user (dev_ifname, optval_addr, 0);
2429 		return ret;
2430 	}
2431         case TARGET_SO_LINGER:
2432         {
2433                 struct linger lg;
2434                 struct target_linger *tlg;
2435 
2436                 if (optlen != sizeof(struct target_linger)) {
2437                     return -TARGET_EINVAL;
2438                 }
2439                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2440                     return -TARGET_EFAULT;
2441                 }
2442                 __get_user(lg.l_onoff, &tlg->l_onoff);
2443                 __get_user(lg.l_linger, &tlg->l_linger);
2444                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2445                                 &lg, sizeof(lg)));
2446                 unlock_user_struct(tlg, optval_addr, 0);
2447                 return ret;
2448         }
2449             /* Options with 'int' argument.  */
2450         case TARGET_SO_DEBUG:
2451 		optname = SO_DEBUG;
2452 		break;
2453         case TARGET_SO_REUSEADDR:
2454 		optname = SO_REUSEADDR;
2455 		break;
2456 #ifdef SO_REUSEPORT
2457         case TARGET_SO_REUSEPORT:
2458                 optname = SO_REUSEPORT;
2459                 break;
2460 #endif
2461         case TARGET_SO_TYPE:
2462 		optname = SO_TYPE;
2463 		break;
2464         case TARGET_SO_ERROR:
2465 		optname = SO_ERROR;
2466 		break;
2467         case TARGET_SO_DONTROUTE:
2468 		optname = SO_DONTROUTE;
2469 		break;
2470         case TARGET_SO_BROADCAST:
2471 		optname = SO_BROADCAST;
2472 		break;
2473         case TARGET_SO_SNDBUF:
2474 		optname = SO_SNDBUF;
2475 		break;
2476         case TARGET_SO_SNDBUFFORCE:
2477                 optname = SO_SNDBUFFORCE;
2478                 break;
2479         case TARGET_SO_RCVBUF:
2480 		optname = SO_RCVBUF;
2481 		break;
2482         case TARGET_SO_RCVBUFFORCE:
2483                 optname = SO_RCVBUFFORCE;
2484                 break;
2485         case TARGET_SO_KEEPALIVE:
2486 		optname = SO_KEEPALIVE;
2487 		break;
2488         case TARGET_SO_OOBINLINE:
2489 		optname = SO_OOBINLINE;
2490 		break;
2491         case TARGET_SO_NO_CHECK:
2492 		optname = SO_NO_CHECK;
2493 		break;
2494         case TARGET_SO_PRIORITY:
2495 		optname = SO_PRIORITY;
2496 		break;
2497 #ifdef SO_BSDCOMPAT
2498         case TARGET_SO_BSDCOMPAT:
2499 		optname = SO_BSDCOMPAT;
2500 		break;
2501 #endif
2502         case TARGET_SO_PASSCRED:
2503 		optname = SO_PASSCRED;
2504 		break;
2505         case TARGET_SO_PASSSEC:
2506                 optname = SO_PASSSEC;
2507                 break;
2508         case TARGET_SO_TIMESTAMP:
2509 		optname = SO_TIMESTAMP;
2510 		break;
2511         case TARGET_SO_RCVLOWAT:
2512 		optname = SO_RCVLOWAT;
2513 		break;
2514         default:
2515             goto unimplemented;
2516         }
2517 	if (optlen < sizeof(uint32_t))
2518             return -TARGET_EINVAL;
2519 
2520 	if (get_user_u32(val, optval_addr))
2521             return -TARGET_EFAULT;
2522 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2523         break;
2524 #ifdef SOL_NETLINK
2525     case SOL_NETLINK:
2526         switch (optname) {
2527         case NETLINK_PKTINFO:
2528         case NETLINK_ADD_MEMBERSHIP:
2529         case NETLINK_DROP_MEMBERSHIP:
2530         case NETLINK_BROADCAST_ERROR:
2531         case NETLINK_NO_ENOBUFS:
2532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2533         case NETLINK_LISTEN_ALL_NSID:
2534         case NETLINK_CAP_ACK:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2537         case NETLINK_EXT_ACK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2540         case NETLINK_GET_STRICT_CHK:
2541 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2542             break;
2543         default:
2544             goto unimplemented;
2545         }
2546         val = 0;
2547         if (optlen < sizeof(uint32_t)) {
2548             return -TARGET_EINVAL;
2549         }
2550         if (get_user_u32(val, optval_addr)) {
2551             return -TARGET_EFAULT;
2552         }
2553         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2554                                    sizeof(val)));
2555         break;
2556 #endif /* SOL_NETLINK */
2557     default:
2558     unimplemented:
2559         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2560                       level, optname);
2561         ret = -TARGET_ENOPROTOOPT;
2562     }
2563     return ret;
2564 }
2565 
2566 /* do_getsockopt() Must return target values and target errnos. */
2567 static abi_long do_getsockopt(int sockfd, int level, int optname,
2568                               abi_ulong optval_addr, abi_ulong optlen)
2569 {
2570     abi_long ret;
2571     int len, val;
2572     socklen_t lv;
2573 
2574     switch(level) {
2575     case TARGET_SOL_SOCKET:
2576         level = SOL_SOCKET;
2577         switch (optname) {
2578         /* These don't just return a single integer */
2579         case TARGET_SO_PEERNAME:
2580             goto unimplemented;
2581         case TARGET_SO_RCVTIMEO: {
2582             struct timeval tv;
2583             socklen_t tvlen;
2584 
2585             optname = SO_RCVTIMEO;
2586 
2587 get_timeout:
2588             if (get_user_u32(len, optlen)) {
2589                 return -TARGET_EFAULT;
2590             }
2591             if (len < 0) {
2592                 return -TARGET_EINVAL;
2593             }
2594 
2595             tvlen = sizeof(tv);
2596             ret = get_errno(getsockopt(sockfd, level, optname,
2597                                        &tv, &tvlen));
2598             if (ret < 0) {
2599                 return ret;
2600             }
2601             if (len > sizeof(struct target_timeval)) {
2602                 len = sizeof(struct target_timeval);
2603             }
2604             if (copy_to_user_timeval(optval_addr, &tv)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (put_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             break;
2611         }
2612         case TARGET_SO_SNDTIMEO:
2613             optname = SO_SNDTIMEO;
2614             goto get_timeout;
2615         case TARGET_SO_PEERCRED: {
2616             struct ucred cr;
2617             socklen_t crlen;
2618             struct target_ucred *tcr;
2619 
2620             if (get_user_u32(len, optlen)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             if (len < 0) {
2624                 return -TARGET_EINVAL;
2625             }
2626 
2627             crlen = sizeof(cr);
2628             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2629                                        &cr, &crlen));
2630             if (ret < 0) {
2631                 return ret;
2632             }
2633             if (len > crlen) {
2634                 len = crlen;
2635             }
2636             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             __put_user(cr.pid, &tcr->pid);
2640             __put_user(cr.uid, &tcr->uid);
2641             __put_user(cr.gid, &tcr->gid);
2642             unlock_user_struct(tcr, optval_addr, 1);
2643             if (put_user_u32(len, optlen)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             break;
2647         }
2648         case TARGET_SO_PEERSEC: {
2649             char *name;
2650 
2651             if (get_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             if (len < 0) {
2655                 return -TARGET_EINVAL;
2656             }
2657             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2658             if (!name) {
2659                 return -TARGET_EFAULT;
2660             }
2661             lv = len;
2662             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2663                                        name, &lv));
2664             if (put_user_u32(lv, optlen)) {
2665                 ret = -TARGET_EFAULT;
2666             }
2667             unlock_user(name, optval_addr, lv);
2668             break;
2669         }
2670         case TARGET_SO_LINGER:
2671         {
2672             struct linger lg;
2673             socklen_t lglen;
2674             struct target_linger *tlg;
2675 
2676             if (get_user_u32(len, optlen)) {
2677                 return -TARGET_EFAULT;
2678             }
2679             if (len < 0) {
2680                 return -TARGET_EINVAL;
2681             }
2682 
2683             lglen = sizeof(lg);
2684             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2685                                        &lg, &lglen));
2686             if (ret < 0) {
2687                 return ret;
2688             }
2689             if (len > lglen) {
2690                 len = lglen;
2691             }
2692             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2693                 return -TARGET_EFAULT;
2694             }
2695             __put_user(lg.l_onoff, &tlg->l_onoff);
2696             __put_user(lg.l_linger, &tlg->l_linger);
2697             unlock_user_struct(tlg, optval_addr, 1);
2698             if (put_user_u32(len, optlen)) {
2699                 return -TARGET_EFAULT;
2700             }
2701             break;
2702         }
2703         /* Options with 'int' argument.  */
2704         case TARGET_SO_DEBUG:
2705             optname = SO_DEBUG;
2706             goto int_case;
2707         case TARGET_SO_REUSEADDR:
2708             optname = SO_REUSEADDR;
2709             goto int_case;
2710 #ifdef SO_REUSEPORT
2711         case TARGET_SO_REUSEPORT:
2712             optname = SO_REUSEPORT;
2713             goto int_case;
2714 #endif
2715         case TARGET_SO_TYPE:
2716             optname = SO_TYPE;
2717             goto int_case;
2718         case TARGET_SO_ERROR:
2719             optname = SO_ERROR;
2720             goto int_case;
2721         case TARGET_SO_DONTROUTE:
2722             optname = SO_DONTROUTE;
2723             goto int_case;
2724         case TARGET_SO_BROADCAST:
2725             optname = SO_BROADCAST;
2726             goto int_case;
2727         case TARGET_SO_SNDBUF:
2728             optname = SO_SNDBUF;
2729             goto int_case;
2730         case TARGET_SO_RCVBUF:
2731             optname = SO_RCVBUF;
2732             goto int_case;
2733         case TARGET_SO_KEEPALIVE:
2734             optname = SO_KEEPALIVE;
2735             goto int_case;
2736         case TARGET_SO_OOBINLINE:
2737             optname = SO_OOBINLINE;
2738             goto int_case;
2739         case TARGET_SO_NO_CHECK:
2740             optname = SO_NO_CHECK;
2741             goto int_case;
2742         case TARGET_SO_PRIORITY:
2743             optname = SO_PRIORITY;
2744             goto int_case;
2745 #ifdef SO_BSDCOMPAT
2746         case TARGET_SO_BSDCOMPAT:
2747             optname = SO_BSDCOMPAT;
2748             goto int_case;
2749 #endif
2750         case TARGET_SO_PASSCRED:
2751             optname = SO_PASSCRED;
2752             goto int_case;
2753         case TARGET_SO_TIMESTAMP:
2754             optname = SO_TIMESTAMP;
2755             goto int_case;
2756         case TARGET_SO_RCVLOWAT:
2757             optname = SO_RCVLOWAT;
2758             goto int_case;
2759         case TARGET_SO_ACCEPTCONN:
2760             optname = SO_ACCEPTCONN;
2761             goto int_case;
2762         case TARGET_SO_PROTOCOL:
2763             optname = SO_PROTOCOL;
2764             goto int_case;
2765         case TARGET_SO_DOMAIN:
2766             optname = SO_DOMAIN;
2767             goto int_case;
2768         default:
2769             goto int_case;
2770         }
2771         break;
2772     case SOL_TCP:
2773     case SOL_UDP:
2774         /* TCP and UDP options all take an 'int' value.  */
2775     int_case:
2776         if (get_user_u32(len, optlen))
2777             return -TARGET_EFAULT;
2778         if (len < 0)
2779             return -TARGET_EINVAL;
2780         lv = sizeof(lv);
2781         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2782         if (ret < 0)
2783             return ret;
2784         if (optname == SO_TYPE) {
2785             val = host_to_target_sock_type(val);
2786         }
2787         if (len > lv)
2788             len = lv;
2789         if (len == 4) {
2790             if (put_user_u32(val, optval_addr))
2791                 return -TARGET_EFAULT;
2792         } else {
2793             if (put_user_u8(val, optval_addr))
2794                 return -TARGET_EFAULT;
2795         }
2796         if (put_user_u32(len, optlen))
2797             return -TARGET_EFAULT;
2798         break;
2799     case SOL_IP:
2800         switch(optname) {
2801         case IP_TOS:
2802         case IP_TTL:
2803         case IP_HDRINCL:
2804         case IP_ROUTER_ALERT:
2805         case IP_RECVOPTS:
2806         case IP_RETOPTS:
2807         case IP_PKTINFO:
2808         case IP_MTU_DISCOVER:
2809         case IP_RECVERR:
2810         case IP_RECVTOS:
2811 #ifdef IP_FREEBIND
2812         case IP_FREEBIND:
2813 #endif
2814         case IP_MULTICAST_TTL:
2815         case IP_MULTICAST_LOOP:
2816             if (get_user_u32(len, optlen))
2817                 return -TARGET_EFAULT;
2818             if (len < 0)
2819                 return -TARGET_EINVAL;
2820             lv = sizeof(lv);
2821             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2822             if (ret < 0)
2823                 return ret;
2824             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2825                 len = 1;
2826                 if (put_user_u32(len, optlen)
2827                     || put_user_u8(val, optval_addr))
2828                     return -TARGET_EFAULT;
2829             } else {
2830                 if (len > sizeof(int))
2831                     len = sizeof(int);
2832                 if (put_user_u32(len, optlen)
2833                     || put_user_u32(val, optval_addr))
2834                     return -TARGET_EFAULT;
2835             }
2836             break;
2837         default:
2838             ret = -TARGET_ENOPROTOOPT;
2839             break;
2840         }
2841         break;
2842     case SOL_IPV6:
2843         switch (optname) {
2844         case IPV6_MTU_DISCOVER:
2845         case IPV6_MTU:
2846         case IPV6_V6ONLY:
2847         case IPV6_RECVPKTINFO:
2848         case IPV6_UNICAST_HOPS:
2849         case IPV6_MULTICAST_HOPS:
2850         case IPV6_MULTICAST_LOOP:
2851         case IPV6_RECVERR:
2852         case IPV6_RECVHOPLIMIT:
2853         case IPV6_2292HOPLIMIT:
2854         case IPV6_CHECKSUM:
2855         case IPV6_ADDRFORM:
2856         case IPV6_2292PKTINFO:
2857         case IPV6_RECVTCLASS:
2858         case IPV6_RECVRTHDR:
2859         case IPV6_2292RTHDR:
2860         case IPV6_RECVHOPOPTS:
2861         case IPV6_2292HOPOPTS:
2862         case IPV6_RECVDSTOPTS:
2863         case IPV6_2292DSTOPTS:
2864         case IPV6_TCLASS:
2865         case IPV6_ADDR_PREFERENCES:
2866 #ifdef IPV6_RECVPATHMTU
2867         case IPV6_RECVPATHMTU:
2868 #endif
2869 #ifdef IPV6_TRANSPARENT
2870         case IPV6_TRANSPARENT:
2871 #endif
2872 #ifdef IPV6_FREEBIND
2873         case IPV6_FREEBIND:
2874 #endif
2875 #ifdef IPV6_RECVORIGDSTADDR
2876         case IPV6_RECVORIGDSTADDR:
2877 #endif
2878             if (get_user_u32(len, optlen))
2879                 return -TARGET_EFAULT;
2880             if (len < 0)
2881                 return -TARGET_EINVAL;
2882             lv = sizeof(lv);
2883             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2884             if (ret < 0)
2885                 return ret;
2886             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2887                 len = 1;
2888                 if (put_user_u32(len, optlen)
2889                     || put_user_u8(val, optval_addr))
2890                     return -TARGET_EFAULT;
2891             } else {
2892                 if (len > sizeof(int))
2893                     len = sizeof(int);
2894                 if (put_user_u32(len, optlen)
2895                     || put_user_u32(val, optval_addr))
2896                     return -TARGET_EFAULT;
2897             }
2898             break;
2899         default:
2900             ret = -TARGET_ENOPROTOOPT;
2901             break;
2902         }
2903         break;
2904 #ifdef SOL_NETLINK
2905     case SOL_NETLINK:
2906         switch (optname) {
2907         case NETLINK_PKTINFO:
2908         case NETLINK_BROADCAST_ERROR:
2909         case NETLINK_NO_ENOBUFS:
2910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2911         case NETLINK_LISTEN_ALL_NSID:
2912         case NETLINK_CAP_ACK:
2913 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2914 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2915         case NETLINK_EXT_ACK:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2918         case NETLINK_GET_STRICT_CHK:
2919 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2920             if (get_user_u32(len, optlen)) {
2921                 return -TARGET_EFAULT;
2922             }
2923             if (len != sizeof(val)) {
2924                 return -TARGET_EINVAL;
2925             }
2926             lv = len;
2927             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2928             if (ret < 0) {
2929                 return ret;
2930             }
2931             if (put_user_u32(lv, optlen)
2932                 || put_user_u32(val, optval_addr)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             break;
2936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2937         case NETLINK_LIST_MEMBERSHIPS:
2938         {
2939             uint32_t *results;
2940             int i;
2941             if (get_user_u32(len, optlen)) {
2942                 return -TARGET_EFAULT;
2943             }
2944             if (len < 0) {
2945                 return -TARGET_EINVAL;
2946             }
2947             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2948             if (!results && len > 0) {
2949                 return -TARGET_EFAULT;
2950             }
2951             lv = len;
2952             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2953             if (ret < 0) {
2954                 unlock_user(results, optval_addr, 0);
2955                 return ret;
2956             }
2957             /* swap host endianess to target endianess. */
2958             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2959                 results[i] = tswap32(results[i]);
2960             }
2961             if (put_user_u32(lv, optlen)) {
2962                 return -TARGET_EFAULT;
2963             }
2964             unlock_user(results, optval_addr, 0);
2965             break;
2966         }
2967 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2968         default:
2969             goto unimplemented;
2970         }
2971         break;
2972 #endif /* SOL_NETLINK */
2973     default:
2974     unimplemented:
2975         qemu_log_mask(LOG_UNIMP,
2976                       "getsockopt level=%d optname=%d not yet supported\n",
2977                       level, optname);
2978         ret = -TARGET_EOPNOTSUPP;
2979         break;
2980     }
2981     return ret;
2982 }
2983 
2984 /* Convert target low/high pair representing file offset into the host
2985  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2986  * as the kernel doesn't handle them either.
2987  */
2988 static void target_to_host_low_high(abi_ulong tlow,
2989                                     abi_ulong thigh,
2990                                     unsigned long *hlow,
2991                                     unsigned long *hhigh)
2992 {
2993     uint64_t off = tlow |
2994         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2995         TARGET_LONG_BITS / 2;
2996 
2997     *hlow = off;
2998     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2999 }
3000 
3001 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3002                                 abi_ulong count, int copy)
3003 {
3004     struct target_iovec *target_vec;
3005     struct iovec *vec;
3006     abi_ulong total_len, max_len;
3007     int i;
3008     int err = 0;
3009     bool bad_address = false;
3010 
3011     if (count == 0) {
3012         errno = 0;
3013         return NULL;
3014     }
3015     if (count > IOV_MAX) {
3016         errno = EINVAL;
3017         return NULL;
3018     }
3019 
3020     vec = g_try_new0(struct iovec, count);
3021     if (vec == NULL) {
3022         errno = ENOMEM;
3023         return NULL;
3024     }
3025 
3026     target_vec = lock_user(VERIFY_READ, target_addr,
3027                            count * sizeof(struct target_iovec), 1);
3028     if (target_vec == NULL) {
3029         err = EFAULT;
3030         goto fail2;
3031     }
3032 
3033     /* ??? If host page size > target page size, this will result in a
3034        value larger than what we can actually support.  */
3035     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3036     total_len = 0;
3037 
3038     for (i = 0; i < count; i++) {
3039         abi_ulong base = tswapal(target_vec[i].iov_base);
3040         abi_long len = tswapal(target_vec[i].iov_len);
3041 
3042         if (len < 0) {
3043             err = EINVAL;
3044             goto fail;
3045         } else if (len == 0) {
3046             /* Zero length pointer is ignored.  */
3047             vec[i].iov_base = 0;
3048         } else {
3049             vec[i].iov_base = lock_user(type, base, len, copy);
3050             /* If the first buffer pointer is bad, this is a fault.  But
3051              * subsequent bad buffers will result in a partial write; this
3052              * is realized by filling the vector with null pointers and
3053              * zero lengths. */
3054             if (!vec[i].iov_base) {
3055                 if (i == 0) {
3056                     err = EFAULT;
3057                     goto fail;
3058                 } else {
3059                     bad_address = true;
3060                 }
3061             }
3062             if (bad_address) {
3063                 len = 0;
3064             }
3065             if (len > max_len - total_len) {
3066                 len = max_len - total_len;
3067             }
3068         }
3069         vec[i].iov_len = len;
3070         total_len += len;
3071     }
3072 
3073     unlock_user(target_vec, target_addr, 0);
3074     return vec;
3075 
3076  fail:
3077     while (--i >= 0) {
3078         if (tswapal(target_vec[i].iov_len) > 0) {
3079             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3080         }
3081     }
3082     unlock_user(target_vec, target_addr, 0);
3083  fail2:
3084     g_free(vec);
3085     errno = err;
3086     return NULL;
3087 }
3088 
3089 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3090                          abi_ulong count, int copy)
3091 {
3092     struct target_iovec *target_vec;
3093     int i;
3094 
3095     target_vec = lock_user(VERIFY_READ, target_addr,
3096                            count * sizeof(struct target_iovec), 1);
3097     if (target_vec) {
3098         for (i = 0; i < count; i++) {
3099             abi_ulong base = tswapal(target_vec[i].iov_base);
3100             abi_long len = tswapal(target_vec[i].iov_len);
3101             if (len < 0) {
3102                 break;
3103             }
3104             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3105         }
3106         unlock_user(target_vec, target_addr, 0);
3107     }
3108 
3109     g_free(vec);
3110 }
3111 
3112 static inline int target_to_host_sock_type(int *type)
3113 {
3114     int host_type = 0;
3115     int target_type = *type;
3116 
3117     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3118     case TARGET_SOCK_DGRAM:
3119         host_type = SOCK_DGRAM;
3120         break;
3121     case TARGET_SOCK_STREAM:
3122         host_type = SOCK_STREAM;
3123         break;
3124     default:
3125         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3126         break;
3127     }
3128     if (target_type & TARGET_SOCK_CLOEXEC) {
3129 #if defined(SOCK_CLOEXEC)
3130         host_type |= SOCK_CLOEXEC;
3131 #else
3132         return -TARGET_EINVAL;
3133 #endif
3134     }
3135     if (target_type & TARGET_SOCK_NONBLOCK) {
3136 #if defined(SOCK_NONBLOCK)
3137         host_type |= SOCK_NONBLOCK;
3138 #elif !defined(O_NONBLOCK)
3139         return -TARGET_EINVAL;
3140 #endif
3141     }
3142     *type = host_type;
3143     return 0;
3144 }
3145 
3146 /* Try to emulate socket type flags after socket creation.  */
3147 static int sock_flags_fixup(int fd, int target_type)
3148 {
3149 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3150     if (target_type & TARGET_SOCK_NONBLOCK) {
3151         int flags = fcntl(fd, F_GETFL);
3152         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3153             close(fd);
3154             return -TARGET_EINVAL;
3155         }
3156     }
3157 #endif
3158     return fd;
3159 }
3160 
3161 /* do_socket() Must return target values and target errnos. */
3162 static abi_long do_socket(int domain, int type, int protocol)
3163 {
3164     int target_type = type;
3165     int ret;
3166 
3167     ret = target_to_host_sock_type(&type);
3168     if (ret) {
3169         return ret;
3170     }
3171 
3172     if (domain == PF_NETLINK && !(
3173 #ifdef CONFIG_RTNETLINK
3174          protocol == NETLINK_ROUTE ||
3175 #endif
3176          protocol == NETLINK_KOBJECT_UEVENT ||
3177          protocol == NETLINK_AUDIT)) {
3178         return -TARGET_EPROTONOSUPPORT;
3179     }
3180 
3181     if (domain == AF_PACKET ||
3182         (domain == AF_INET && type == SOCK_PACKET)) {
3183         protocol = tswap16(protocol);
3184     }
3185 
3186     ret = get_errno(socket(domain, type, protocol));
3187     if (ret >= 0) {
3188         ret = sock_flags_fixup(ret, target_type);
3189         if (type == SOCK_PACKET) {
3190             /* Manage an obsolete case :
3191              * if socket type is SOCK_PACKET, bind by name
3192              */
3193             fd_trans_register(ret, &target_packet_trans);
3194         } else if (domain == PF_NETLINK) {
3195             switch (protocol) {
3196 #ifdef CONFIG_RTNETLINK
3197             case NETLINK_ROUTE:
3198                 fd_trans_register(ret, &target_netlink_route_trans);
3199                 break;
3200 #endif
3201             case NETLINK_KOBJECT_UEVENT:
3202                 /* nothing to do: messages are strings */
3203                 break;
3204             case NETLINK_AUDIT:
3205                 fd_trans_register(ret, &target_netlink_audit_trans);
3206                 break;
3207             default:
3208                 g_assert_not_reached();
3209             }
3210         }
3211     }
3212     return ret;
3213 }
3214 
3215 /* do_bind() Must return target values and target errnos. */
3216 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3217                         socklen_t addrlen)
3218 {
3219     void *addr;
3220     abi_long ret;
3221 
3222     if ((int)addrlen < 0) {
3223         return -TARGET_EINVAL;
3224     }
3225 
3226     addr = alloca(addrlen+1);
3227 
3228     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3229     if (ret)
3230         return ret;
3231 
3232     return get_errno(bind(sockfd, addr, addrlen));
3233 }
3234 
3235 /* do_connect() Must return target values and target errnos. */
3236 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3237                            socklen_t addrlen)
3238 {
3239     void *addr;
3240     abi_long ret;
3241 
3242     if ((int)addrlen < 0) {
3243         return -TARGET_EINVAL;
3244     }
3245 
3246     addr = alloca(addrlen+1);
3247 
3248     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3249     if (ret)
3250         return ret;
3251 
3252     return get_errno(safe_connect(sockfd, addr, addrlen));
3253 }
3254 
3255 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3256 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3257                                       int flags, int send)
3258 {
3259     abi_long ret, len;
3260     struct msghdr msg;
3261     abi_ulong count;
3262     struct iovec *vec;
3263     abi_ulong target_vec;
3264 
3265     if (msgp->msg_name) {
3266         msg.msg_namelen = tswap32(msgp->msg_namelen);
3267         msg.msg_name = alloca(msg.msg_namelen+1);
3268         ret = target_to_host_sockaddr(fd, msg.msg_name,
3269                                       tswapal(msgp->msg_name),
3270                                       msg.msg_namelen);
3271         if (ret == -TARGET_EFAULT) {
3272             /* For connected sockets msg_name and msg_namelen must
3273              * be ignored, so returning EFAULT immediately is wrong.
3274              * Instead, pass a bad msg_name to the host kernel, and
3275              * let it decide whether to return EFAULT or not.
3276              */
3277             msg.msg_name = (void *)-1;
3278         } else if (ret) {
3279             goto out2;
3280         }
3281     } else {
3282         msg.msg_name = NULL;
3283         msg.msg_namelen = 0;
3284     }
3285     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3286     msg.msg_control = alloca(msg.msg_controllen);
3287     memset(msg.msg_control, 0, msg.msg_controllen);
3288 
3289     msg.msg_flags = tswap32(msgp->msg_flags);
3290 
3291     count = tswapal(msgp->msg_iovlen);
3292     target_vec = tswapal(msgp->msg_iov);
3293 
3294     if (count > IOV_MAX) {
3295         /* sendrcvmsg returns a different errno for this condition than
3296          * readv/writev, so we must catch it here before lock_iovec() does.
3297          */
3298         ret = -TARGET_EMSGSIZE;
3299         goto out2;
3300     }
3301 
3302     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3303                      target_vec, count, send);
3304     if (vec == NULL) {
3305         ret = -host_to_target_errno(errno);
3306         goto out2;
3307     }
3308     msg.msg_iovlen = count;
3309     msg.msg_iov = vec;
3310 
3311     if (send) {
3312         if (fd_trans_target_to_host_data(fd)) {
3313             void *host_msg;
3314 
3315             host_msg = g_malloc(msg.msg_iov->iov_len);
3316             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3317             ret = fd_trans_target_to_host_data(fd)(host_msg,
3318                                                    msg.msg_iov->iov_len);
3319             if (ret >= 0) {
3320                 msg.msg_iov->iov_base = host_msg;
3321                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3322             }
3323             g_free(host_msg);
3324         } else {
3325             ret = target_to_host_cmsg(&msg, msgp);
3326             if (ret == 0) {
3327                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328             }
3329         }
3330     } else {
3331         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3332         if (!is_error(ret)) {
3333             len = ret;
3334             if (fd_trans_host_to_target_data(fd)) {
3335                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3336                                                MIN(msg.msg_iov->iov_len, len));
3337             } else {
3338                 ret = host_to_target_cmsg(msgp, &msg);
3339             }
3340             if (!is_error(ret)) {
3341                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3342                 msgp->msg_flags = tswap32(msg.msg_flags);
3343                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3344                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3345                                     msg.msg_name, msg.msg_namelen);
3346                     if (ret) {
3347                         goto out;
3348                     }
3349                 }
3350 
3351                 ret = len;
3352             }
3353         }
3354     }
3355 
3356 out:
3357     unlock_iovec(vec, target_vec, count, !send);
3358 out2:
3359     return ret;
3360 }
3361 
3362 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3363                                int flags, int send)
3364 {
3365     abi_long ret;
3366     struct target_msghdr *msgp;
3367 
3368     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3369                           msgp,
3370                           target_msg,
3371                           send ? 1 : 0)) {
3372         return -TARGET_EFAULT;
3373     }
3374     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3375     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3376     return ret;
3377 }
3378 
3379 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3380  * so it might not have this *mmsg-specific flag either.
3381  */
3382 #ifndef MSG_WAITFORONE
3383 #define MSG_WAITFORONE 0x10000
3384 #endif
3385 
3386 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3387                                 unsigned int vlen, unsigned int flags,
3388                                 int send)
3389 {
3390     struct target_mmsghdr *mmsgp;
3391     abi_long ret = 0;
3392     int i;
3393 
3394     if (vlen > UIO_MAXIOV) {
3395         vlen = UIO_MAXIOV;
3396     }
3397 
3398     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3399     if (!mmsgp) {
3400         return -TARGET_EFAULT;
3401     }
3402 
3403     for (i = 0; i < vlen; i++) {
3404         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3405         if (is_error(ret)) {
3406             break;
3407         }
3408         mmsgp[i].msg_len = tswap32(ret);
3409         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3410         if (flags & MSG_WAITFORONE) {
3411             flags |= MSG_DONTWAIT;
3412         }
3413     }
3414 
3415     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3416 
3417     /* Return number of datagrams sent if we sent any at all;
3418      * otherwise return the error.
3419      */
3420     if (i) {
3421         return i;
3422     }
3423     return ret;
3424 }
3425 
3426 /* do_accept4() Must return target values and target errnos. */
3427 static abi_long do_accept4(int fd, abi_ulong target_addr,
3428                            abi_ulong target_addrlen_addr, int flags)
3429 {
3430     socklen_t addrlen, ret_addrlen;
3431     void *addr;
3432     abi_long ret;
3433     int host_flags;
3434 
3435     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3436 
3437     if (target_addr == 0) {
3438         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3439     }
3440 
3441     /* linux returns EFAULT if addrlen pointer is invalid */
3442     if (get_user_u32(addrlen, target_addrlen_addr))
3443         return -TARGET_EFAULT;
3444 
3445     if ((int)addrlen < 0) {
3446         return -TARGET_EINVAL;
3447     }
3448 
3449     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3450         return -TARGET_EFAULT;
3451     }
3452 
3453     addr = alloca(addrlen);
3454 
3455     ret_addrlen = addrlen;
3456     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3457     if (!is_error(ret)) {
3458         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3459         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3460             ret = -TARGET_EFAULT;
3461         }
3462     }
3463     return ret;
3464 }
3465 
3466 /* do_getpeername() Must return target values and target errnos. */
3467 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3468                                abi_ulong target_addrlen_addr)
3469 {
3470     socklen_t addrlen, ret_addrlen;
3471     void *addr;
3472     abi_long ret;
3473 
3474     if (get_user_u32(addrlen, target_addrlen_addr))
3475         return -TARGET_EFAULT;
3476 
3477     if ((int)addrlen < 0) {
3478         return -TARGET_EINVAL;
3479     }
3480 
3481     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3482         return -TARGET_EFAULT;
3483     }
3484 
3485     addr = alloca(addrlen);
3486 
3487     ret_addrlen = addrlen;
3488     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3489     if (!is_error(ret)) {
3490         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3491         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3492             ret = -TARGET_EFAULT;
3493         }
3494     }
3495     return ret;
3496 }
3497 
3498 /* do_getsockname() Must return target values and target errnos. */
3499 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3500                                abi_ulong target_addrlen_addr)
3501 {
3502     socklen_t addrlen, ret_addrlen;
3503     void *addr;
3504     abi_long ret;
3505 
3506     if (get_user_u32(addrlen, target_addrlen_addr))
3507         return -TARGET_EFAULT;
3508 
3509     if ((int)addrlen < 0) {
3510         return -TARGET_EINVAL;
3511     }
3512 
3513     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3514         return -TARGET_EFAULT;
3515     }
3516 
3517     addr = alloca(addrlen);
3518 
3519     ret_addrlen = addrlen;
3520     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3521     if (!is_error(ret)) {
3522         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3523         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3524             ret = -TARGET_EFAULT;
3525         }
3526     }
3527     return ret;
3528 }
3529 
3530 /* do_socketpair() Must return target values and target errnos. */
3531 static abi_long do_socketpair(int domain, int type, int protocol,
3532                               abi_ulong target_tab_addr)
3533 {
3534     int tab[2];
3535     abi_long ret;
3536 
3537     target_to_host_sock_type(&type);
3538 
3539     ret = get_errno(socketpair(domain, type, protocol, tab));
3540     if (!is_error(ret)) {
3541         if (put_user_s32(tab[0], target_tab_addr)
3542             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3543             ret = -TARGET_EFAULT;
3544     }
3545     return ret;
3546 }
3547 
3548 /* do_sendto() Must return target values and target errnos. */
3549 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3550                           abi_ulong target_addr, socklen_t addrlen)
3551 {
3552     void *addr;
3553     void *host_msg;
3554     void *copy_msg = NULL;
3555     abi_long ret;
3556 
3557     if ((int)addrlen < 0) {
3558         return -TARGET_EINVAL;
3559     }
3560 
3561     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3562     if (!host_msg)
3563         return -TARGET_EFAULT;
3564     if (fd_trans_target_to_host_data(fd)) {
3565         copy_msg = host_msg;
3566         host_msg = g_malloc(len);
3567         memcpy(host_msg, copy_msg, len);
3568         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3569         if (ret < 0) {
3570             goto fail;
3571         }
3572     }
3573     if (target_addr) {
3574         addr = alloca(addrlen+1);
3575         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3576         if (ret) {
3577             goto fail;
3578         }
3579         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3580     } else {
3581         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3582     }
3583 fail:
3584     if (copy_msg) {
3585         g_free(host_msg);
3586         host_msg = copy_msg;
3587     }
3588     unlock_user(host_msg, msg, 0);
3589     return ret;
3590 }
3591 
3592 /* do_recvfrom() Must return target values and target errnos. */
3593 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3594                             abi_ulong target_addr,
3595                             abi_ulong target_addrlen)
3596 {
3597     socklen_t addrlen, ret_addrlen;
3598     void *addr;
3599     void *host_msg;
3600     abi_long ret;
3601 
3602     if (!msg) {
3603         host_msg = NULL;
3604     } else {
3605         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3606         if (!host_msg) {
3607             return -TARGET_EFAULT;
3608         }
3609     }
3610     if (target_addr) {
3611         if (get_user_u32(addrlen, target_addrlen)) {
3612             ret = -TARGET_EFAULT;
3613             goto fail;
3614         }
3615         if ((int)addrlen < 0) {
3616             ret = -TARGET_EINVAL;
3617             goto fail;
3618         }
3619         addr = alloca(addrlen);
3620         ret_addrlen = addrlen;
3621         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3622                                       addr, &ret_addrlen));
3623     } else {
3624         addr = NULL; /* To keep compiler quiet.  */
3625         addrlen = 0; /* To keep compiler quiet.  */
3626         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3627     }
3628     if (!is_error(ret)) {
3629         if (fd_trans_host_to_target_data(fd)) {
3630             abi_long trans;
3631             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3632             if (is_error(trans)) {
3633                 ret = trans;
3634                 goto fail;
3635             }
3636         }
3637         if (target_addr) {
3638             host_to_target_sockaddr(target_addr, addr,
3639                                     MIN(addrlen, ret_addrlen));
3640             if (put_user_u32(ret_addrlen, target_addrlen)) {
3641                 ret = -TARGET_EFAULT;
3642                 goto fail;
3643             }
3644         }
3645         unlock_user(host_msg, msg, len);
3646     } else {
3647 fail:
3648         unlock_user(host_msg, msg, 0);
3649     }
3650     return ret;
3651 }
3652 
3653 #ifdef TARGET_NR_socketcall
3654 /* do_socketcall() must return target values and target errnos. */
3655 static abi_long do_socketcall(int num, abi_ulong vptr)
3656 {
3657     static const unsigned nargs[] = { /* number of arguments per operation */
3658         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3659         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3660         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3661         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3662         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3663         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3664         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3665         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3666         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3667         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3668         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3669         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3670         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3671         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3672         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3673         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3674         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3675         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3676         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3677         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3678     };
3679     abi_long a[6]; /* max 6 args */
3680     unsigned i;
3681 
3682     /* check the range of the first argument num */
3683     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3684     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3685         return -TARGET_EINVAL;
3686     }
3687     /* ensure we have space for args */
3688     if (nargs[num] > ARRAY_SIZE(a)) {
3689         return -TARGET_EINVAL;
3690     }
3691     /* collect the arguments in a[] according to nargs[] */
3692     for (i = 0; i < nargs[num]; ++i) {
3693         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3694             return -TARGET_EFAULT;
3695         }
3696     }
3697     /* now when we have the args, invoke the appropriate underlying function */
3698     switch (num) {
3699     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3700         return do_socket(a[0], a[1], a[2]);
3701     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3702         return do_bind(a[0], a[1], a[2]);
3703     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3704         return do_connect(a[0], a[1], a[2]);
3705     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3706         return get_errno(listen(a[0], a[1]));
3707     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3708         return do_accept4(a[0], a[1], a[2], 0);
3709     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3710         return do_getsockname(a[0], a[1], a[2]);
3711     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3712         return do_getpeername(a[0], a[1], a[2]);
3713     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3714         return do_socketpair(a[0], a[1], a[2], a[3]);
3715     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3716         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3717     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3718         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3719     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3720         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3721     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3722         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3723     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3724         return get_errno(shutdown(a[0], a[1]));
3725     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3726         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3727     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3728         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3729     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3730         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3731     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3732         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3733     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3734         return do_accept4(a[0], a[1], a[2], a[3]);
3735     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3736         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3737     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3738         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3739     default:
3740         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3741         return -TARGET_EINVAL;
3742     }
3743 }
3744 #endif
3745 
3746 #define N_SHM_REGIONS	32
3747 
3748 static struct shm_region {
3749     abi_ulong start;
3750     abi_ulong size;
3751     bool in_use;
3752 } shm_regions[N_SHM_REGIONS];
3753 
3754 #ifndef TARGET_SEMID64_DS
3755 /* asm-generic version of this struct */
3756 struct target_semid64_ds
3757 {
3758   struct target_ipc_perm sem_perm;
3759   abi_ulong sem_otime;
3760 #if TARGET_ABI_BITS == 32
3761   abi_ulong __unused1;
3762 #endif
3763   abi_ulong sem_ctime;
3764 #if TARGET_ABI_BITS == 32
3765   abi_ulong __unused2;
3766 #endif
3767   abi_ulong sem_nsems;
3768   abi_ulong __unused3;
3769   abi_ulong __unused4;
3770 };
3771 #endif
3772 
3773 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3774                                                abi_ulong target_addr)
3775 {
3776     struct target_ipc_perm *target_ip;
3777     struct target_semid64_ds *target_sd;
3778 
3779     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3780         return -TARGET_EFAULT;
3781     target_ip = &(target_sd->sem_perm);
3782     host_ip->__key = tswap32(target_ip->__key);
3783     host_ip->uid = tswap32(target_ip->uid);
3784     host_ip->gid = tswap32(target_ip->gid);
3785     host_ip->cuid = tswap32(target_ip->cuid);
3786     host_ip->cgid = tswap32(target_ip->cgid);
3787 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3788     host_ip->mode = tswap32(target_ip->mode);
3789 #else
3790     host_ip->mode = tswap16(target_ip->mode);
3791 #endif
3792 #if defined(TARGET_PPC)
3793     host_ip->__seq = tswap32(target_ip->__seq);
3794 #else
3795     host_ip->__seq = tswap16(target_ip->__seq);
3796 #endif
3797     unlock_user_struct(target_sd, target_addr, 0);
3798     return 0;
3799 }
3800 
3801 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3802                                                struct ipc_perm *host_ip)
3803 {
3804     struct target_ipc_perm *target_ip;
3805     struct target_semid64_ds *target_sd;
3806 
3807     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3808         return -TARGET_EFAULT;
3809     target_ip = &(target_sd->sem_perm);
3810     target_ip->__key = tswap32(host_ip->__key);
3811     target_ip->uid = tswap32(host_ip->uid);
3812     target_ip->gid = tswap32(host_ip->gid);
3813     target_ip->cuid = tswap32(host_ip->cuid);
3814     target_ip->cgid = tswap32(host_ip->cgid);
3815 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3816     target_ip->mode = tswap32(host_ip->mode);
3817 #else
3818     target_ip->mode = tswap16(host_ip->mode);
3819 #endif
3820 #if defined(TARGET_PPC)
3821     target_ip->__seq = tswap32(host_ip->__seq);
3822 #else
3823     target_ip->__seq = tswap16(host_ip->__seq);
3824 #endif
3825     unlock_user_struct(target_sd, target_addr, 1);
3826     return 0;
3827 }
3828 
3829 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3830                                                abi_ulong target_addr)
3831 {
3832     struct target_semid64_ds *target_sd;
3833 
3834     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3835         return -TARGET_EFAULT;
3836     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3837         return -TARGET_EFAULT;
3838     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3839     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3840     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3841     unlock_user_struct(target_sd, target_addr, 0);
3842     return 0;
3843 }
3844 
3845 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3846                                                struct semid_ds *host_sd)
3847 {
3848     struct target_semid64_ds *target_sd;
3849 
3850     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3851         return -TARGET_EFAULT;
3852     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3853         return -TARGET_EFAULT;
3854     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3855     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3856     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3857     unlock_user_struct(target_sd, target_addr, 1);
3858     return 0;
3859 }
3860 
3861 struct target_seminfo {
3862     int semmap;
3863     int semmni;
3864     int semmns;
3865     int semmnu;
3866     int semmsl;
3867     int semopm;
3868     int semume;
3869     int semusz;
3870     int semvmx;
3871     int semaem;
3872 };
3873 
3874 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3875                                               struct seminfo *host_seminfo)
3876 {
3877     struct target_seminfo *target_seminfo;
3878     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3879         return -TARGET_EFAULT;
3880     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3881     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3882     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3883     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3884     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3885     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3886     __put_user(host_seminfo->semume, &target_seminfo->semume);
3887     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3888     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3889     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3890     unlock_user_struct(target_seminfo, target_addr, 1);
3891     return 0;
3892 }
3893 
3894 union semun {
3895 	int val;
3896 	struct semid_ds *buf;
3897 	unsigned short *array;
3898 	struct seminfo *__buf;
3899 };
3900 
3901 union target_semun {
3902 	int val;
3903 	abi_ulong buf;
3904 	abi_ulong array;
3905 	abi_ulong __buf;
3906 };
3907 
3908 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3909                                                abi_ulong target_addr)
3910 {
3911     int nsems;
3912     unsigned short *array;
3913     union semun semun;
3914     struct semid_ds semid_ds;
3915     int i, ret;
3916 
3917     semun.buf = &semid_ds;
3918 
3919     ret = semctl(semid, 0, IPC_STAT, semun);
3920     if (ret == -1)
3921         return get_errno(ret);
3922 
3923     nsems = semid_ds.sem_nsems;
3924 
3925     *host_array = g_try_new(unsigned short, nsems);
3926     if (!*host_array) {
3927         return -TARGET_ENOMEM;
3928     }
3929     array = lock_user(VERIFY_READ, target_addr,
3930                       nsems*sizeof(unsigned short), 1);
3931     if (!array) {
3932         g_free(*host_array);
3933         return -TARGET_EFAULT;
3934     }
3935 
3936     for(i=0; i<nsems; i++) {
3937         __get_user((*host_array)[i], &array[i]);
3938     }
3939     unlock_user(array, target_addr, 0);
3940 
3941     return 0;
3942 }
3943 
3944 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3945                                                unsigned short **host_array)
3946 {
3947     int nsems;
3948     unsigned short *array;
3949     union semun semun;
3950     struct semid_ds semid_ds;
3951     int i, ret;
3952 
3953     semun.buf = &semid_ds;
3954 
3955     ret = semctl(semid, 0, IPC_STAT, semun);
3956     if (ret == -1)
3957         return get_errno(ret);
3958 
3959     nsems = semid_ds.sem_nsems;
3960 
3961     array = lock_user(VERIFY_WRITE, target_addr,
3962                       nsems*sizeof(unsigned short), 0);
3963     if (!array)
3964         return -TARGET_EFAULT;
3965 
3966     for(i=0; i<nsems; i++) {
3967         __put_user((*host_array)[i], &array[i]);
3968     }
3969     g_free(*host_array);
3970     unlock_user(array, target_addr, 1);
3971 
3972     return 0;
3973 }
3974 
3975 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3976                                  abi_ulong target_arg)
3977 {
3978     union target_semun target_su = { .buf = target_arg };
3979     union semun arg;
3980     struct semid_ds dsarg;
3981     unsigned short *array = NULL;
3982     struct seminfo seminfo;
3983     abi_long ret = -TARGET_EINVAL;
3984     abi_long err;
3985     cmd &= 0xff;
3986 
3987     switch( cmd ) {
3988 	case GETVAL:
3989 	case SETVAL:
3990             /* In 64 bit cross-endian situations, we will erroneously pick up
3991              * the wrong half of the union for the "val" element.  To rectify
3992              * this, the entire 8-byte structure is byteswapped, followed by
3993 	     * a swap of the 4 byte val field. In other cases, the data is
3994 	     * already in proper host byte order. */
3995 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3996 		target_su.buf = tswapal(target_su.buf);
3997 		arg.val = tswap32(target_su.val);
3998 	    } else {
3999 		arg.val = target_su.val;
4000 	    }
4001             ret = get_errno(semctl(semid, semnum, cmd, arg));
4002             break;
4003 	case GETALL:
4004 	case SETALL:
4005             err = target_to_host_semarray(semid, &array, target_su.array);
4006             if (err)
4007                 return err;
4008             arg.array = array;
4009             ret = get_errno(semctl(semid, semnum, cmd, arg));
4010             err = host_to_target_semarray(semid, target_su.array, &array);
4011             if (err)
4012                 return err;
4013             break;
4014 	case IPC_STAT:
4015 	case IPC_SET:
4016 	case SEM_STAT:
4017             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4018             if (err)
4019                 return err;
4020             arg.buf = &dsarg;
4021             ret = get_errno(semctl(semid, semnum, cmd, arg));
4022             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4023             if (err)
4024                 return err;
4025             break;
4026 	case IPC_INFO:
4027 	case SEM_INFO:
4028             arg.__buf = &seminfo;
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4031             if (err)
4032                 return err;
4033             break;
4034 	case IPC_RMID:
4035 	case GETPID:
4036 	case GETNCNT:
4037 	case GETZCNT:
4038             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4039             break;
4040     }
4041 
4042     return ret;
4043 }
4044 
4045 struct target_sembuf {
4046     unsigned short sem_num;
4047     short sem_op;
4048     short sem_flg;
4049 };
4050 
4051 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4052                                              abi_ulong target_addr,
4053                                              unsigned nsops)
4054 {
4055     struct target_sembuf *target_sembuf;
4056     int i;
4057 
4058     target_sembuf = lock_user(VERIFY_READ, target_addr,
4059                               nsops*sizeof(struct target_sembuf), 1);
4060     if (!target_sembuf)
4061         return -TARGET_EFAULT;
4062 
4063     for(i=0; i<nsops; i++) {
4064         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4065         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4066         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4067     }
4068 
4069     unlock_user(target_sembuf, target_addr, 0);
4070 
4071     return 0;
4072 }
4073 
4074 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4075     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4076 
4077 /*
4078  * This macro is required to handle the s390 variants, which passes the
4079  * arguments in a different order than default.
4080  */
4081 #ifdef __s390x__
4082 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4083   (__nsops), (__timeout), (__sops)
4084 #else
4085 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4086   (__nsops), 0, (__sops), (__timeout)
4087 #endif
4088 
4089 static inline abi_long do_semtimedop(int semid,
4090                                      abi_long ptr,
4091                                      unsigned nsops,
4092                                      abi_long timeout, bool time64)
4093 {
4094     struct sembuf *sops;
4095     struct timespec ts, *pts = NULL;
4096     abi_long ret;
4097 
4098     if (timeout) {
4099         pts = &ts;
4100         if (time64) {
4101             if (target_to_host_timespec64(pts, timeout)) {
4102                 return -TARGET_EFAULT;
4103             }
4104         } else {
4105             if (target_to_host_timespec(pts, timeout)) {
4106                 return -TARGET_EFAULT;
4107             }
4108         }
4109     }
4110 
4111     if (nsops > TARGET_SEMOPM) {
4112         return -TARGET_E2BIG;
4113     }
4114 
4115     sops = g_new(struct sembuf, nsops);
4116 
4117     if (target_to_host_sembuf(sops, ptr, nsops)) {
4118         g_free(sops);
4119         return -TARGET_EFAULT;
4120     }
4121 
4122     ret = -TARGET_ENOSYS;
4123 #ifdef __NR_semtimedop
4124     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4125 #endif
4126 #ifdef __NR_ipc
4127     if (ret == -TARGET_ENOSYS) {
4128         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4129                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4130     }
4131 #endif
4132     g_free(sops);
4133     return ret;
4134 }
4135 #endif
4136 
4137 struct target_msqid_ds
4138 {
4139     struct target_ipc_perm msg_perm;
4140     abi_ulong msg_stime;
4141 #if TARGET_ABI_BITS == 32
4142     abi_ulong __unused1;
4143 #endif
4144     abi_ulong msg_rtime;
4145 #if TARGET_ABI_BITS == 32
4146     abi_ulong __unused2;
4147 #endif
4148     abi_ulong msg_ctime;
4149 #if TARGET_ABI_BITS == 32
4150     abi_ulong __unused3;
4151 #endif
4152     abi_ulong __msg_cbytes;
4153     abi_ulong msg_qnum;
4154     abi_ulong msg_qbytes;
4155     abi_ulong msg_lspid;
4156     abi_ulong msg_lrpid;
4157     abi_ulong __unused4;
4158     abi_ulong __unused5;
4159 };
4160 
4161 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4162                                                abi_ulong target_addr)
4163 {
4164     struct target_msqid_ds *target_md;
4165 
4166     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4167         return -TARGET_EFAULT;
4168     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4169         return -TARGET_EFAULT;
4170     host_md->msg_stime = tswapal(target_md->msg_stime);
4171     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4172     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4173     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4174     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4175     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4176     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4177     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4178     unlock_user_struct(target_md, target_addr, 0);
4179     return 0;
4180 }
4181 
4182 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4183                                                struct msqid_ds *host_md)
4184 {
4185     struct target_msqid_ds *target_md;
4186 
4187     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4188         return -TARGET_EFAULT;
4189     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4190         return -TARGET_EFAULT;
4191     target_md->msg_stime = tswapal(host_md->msg_stime);
4192     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4193     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4194     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4195     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4196     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4197     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4198     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4199     unlock_user_struct(target_md, target_addr, 1);
4200     return 0;
4201 }
4202 
4203 struct target_msginfo {
4204     int msgpool;
4205     int msgmap;
4206     int msgmax;
4207     int msgmnb;
4208     int msgmni;
4209     int msgssz;
4210     int msgtql;
4211     unsigned short int msgseg;
4212 };
4213 
4214 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4215                                               struct msginfo *host_msginfo)
4216 {
4217     struct target_msginfo *target_msginfo;
4218     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4219         return -TARGET_EFAULT;
4220     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4221     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4222     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4223     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4224     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4225     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4226     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4227     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4228     unlock_user_struct(target_msginfo, target_addr, 1);
4229     return 0;
4230 }
4231 
4232 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4233 {
4234     struct msqid_ds dsarg;
4235     struct msginfo msginfo;
4236     abi_long ret = -TARGET_EINVAL;
4237 
4238     cmd &= 0xff;
4239 
4240     switch (cmd) {
4241     case IPC_STAT:
4242     case IPC_SET:
4243     case MSG_STAT:
4244         if (target_to_host_msqid_ds(&dsarg,ptr))
4245             return -TARGET_EFAULT;
4246         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4247         if (host_to_target_msqid_ds(ptr,&dsarg))
4248             return -TARGET_EFAULT;
4249         break;
4250     case IPC_RMID:
4251         ret = get_errno(msgctl(msgid, cmd, NULL));
4252         break;
4253     case IPC_INFO:
4254     case MSG_INFO:
4255         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4256         if (host_to_target_msginfo(ptr, &msginfo))
4257             return -TARGET_EFAULT;
4258         break;
4259     }
4260 
4261     return ret;
4262 }
4263 
4264 struct target_msgbuf {
4265     abi_long mtype;
4266     char	mtext[1];
4267 };
4268 
4269 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4270                                  ssize_t msgsz, int msgflg)
4271 {
4272     struct target_msgbuf *target_mb;
4273     struct msgbuf *host_mb;
4274     abi_long ret = 0;
4275 
4276     if (msgsz < 0) {
4277         return -TARGET_EINVAL;
4278     }
4279 
4280     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4281         return -TARGET_EFAULT;
4282     host_mb = g_try_malloc(msgsz + sizeof(long));
4283     if (!host_mb) {
4284         unlock_user_struct(target_mb, msgp, 0);
4285         return -TARGET_ENOMEM;
4286     }
4287     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4288     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4289     ret = -TARGET_ENOSYS;
4290 #ifdef __NR_msgsnd
4291     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4292 #endif
4293 #ifdef __NR_ipc
4294     if (ret == -TARGET_ENOSYS) {
4295 #ifdef __s390x__
4296         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4297                                  host_mb));
4298 #else
4299         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4300                                  host_mb, 0));
4301 #endif
4302     }
4303 #endif
4304     g_free(host_mb);
4305     unlock_user_struct(target_mb, msgp, 0);
4306 
4307     return ret;
4308 }
4309 
4310 #ifdef __NR_ipc
4311 #if defined(__sparc__)
4312 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4313 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4314 #elif defined(__s390x__)
4315 /* The s390 sys_ipc variant has only five parameters.  */
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4317     ((long int[]){(long int)__msgp, __msgtyp})
4318 #else
4319 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4320     ((long int[]){(long int)__msgp, __msgtyp}), 0
4321 #endif
4322 #endif
4323 
4324 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4325                                  ssize_t msgsz, abi_long msgtyp,
4326                                  int msgflg)
4327 {
4328     struct target_msgbuf *target_mb;
4329     char *target_mtext;
4330     struct msgbuf *host_mb;
4331     abi_long ret = 0;
4332 
4333     if (msgsz < 0) {
4334         return -TARGET_EINVAL;
4335     }
4336 
4337     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4338         return -TARGET_EFAULT;
4339 
4340     host_mb = g_try_malloc(msgsz + sizeof(long));
4341     if (!host_mb) {
4342         ret = -TARGET_ENOMEM;
4343         goto end;
4344     }
4345     ret = -TARGET_ENOSYS;
4346 #ifdef __NR_msgrcv
4347     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4348 #endif
4349 #ifdef __NR_ipc
4350     if (ret == -TARGET_ENOSYS) {
4351         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4352                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4353     }
4354 #endif
4355 
4356     if (ret > 0) {
4357         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4358         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4359         if (!target_mtext) {
4360             ret = -TARGET_EFAULT;
4361             goto end;
4362         }
4363         memcpy(target_mb->mtext, host_mb->mtext, ret);
4364         unlock_user(target_mtext, target_mtext_addr, ret);
4365     }
4366 
4367     target_mb->mtype = tswapal(host_mb->mtype);
4368 
4369 end:
4370     if (target_mb)
4371         unlock_user_struct(target_mb, msgp, 1);
4372     g_free(host_mb);
4373     return ret;
4374 }
4375 
4376 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4377                                                abi_ulong target_addr)
4378 {
4379     struct target_shmid_ds *target_sd;
4380 
4381     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4382         return -TARGET_EFAULT;
4383     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4384         return -TARGET_EFAULT;
4385     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4386     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4387     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4388     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4389     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4390     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4391     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4392     unlock_user_struct(target_sd, target_addr, 0);
4393     return 0;
4394 }
4395 
4396 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4397                                                struct shmid_ds *host_sd)
4398 {
4399     struct target_shmid_ds *target_sd;
4400 
4401     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4402         return -TARGET_EFAULT;
4403     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4404         return -TARGET_EFAULT;
4405     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4406     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4407     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4408     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4409     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4410     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4411     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4412     unlock_user_struct(target_sd, target_addr, 1);
4413     return 0;
4414 }
4415 
4416 struct  target_shminfo {
4417     abi_ulong shmmax;
4418     abi_ulong shmmin;
4419     abi_ulong shmmni;
4420     abi_ulong shmseg;
4421     abi_ulong shmall;
4422 };
4423 
4424 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4425                                               struct shminfo *host_shminfo)
4426 {
4427     struct target_shminfo *target_shminfo;
4428     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4429         return -TARGET_EFAULT;
4430     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4431     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4432     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4433     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4434     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4435     unlock_user_struct(target_shminfo, target_addr, 1);
4436     return 0;
4437 }
4438 
4439 struct target_shm_info {
4440     int used_ids;
4441     abi_ulong shm_tot;
4442     abi_ulong shm_rss;
4443     abi_ulong shm_swp;
4444     abi_ulong swap_attempts;
4445     abi_ulong swap_successes;
4446 };
4447 
4448 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4449                                                struct shm_info *host_shm_info)
4450 {
4451     struct target_shm_info *target_shm_info;
4452     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4453         return -TARGET_EFAULT;
4454     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4455     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4456     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4457     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4458     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4459     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4460     unlock_user_struct(target_shm_info, target_addr, 1);
4461     return 0;
4462 }
4463 
4464 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4465 {
4466     struct shmid_ds dsarg;
4467     struct shminfo shminfo;
4468     struct shm_info shm_info;
4469     abi_long ret = -TARGET_EINVAL;
4470 
4471     cmd &= 0xff;
4472 
4473     switch(cmd) {
4474     case IPC_STAT:
4475     case IPC_SET:
4476     case SHM_STAT:
4477         if (target_to_host_shmid_ds(&dsarg, buf))
4478             return -TARGET_EFAULT;
4479         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4480         if (host_to_target_shmid_ds(buf, &dsarg))
4481             return -TARGET_EFAULT;
4482         break;
4483     case IPC_INFO:
4484         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4485         if (host_to_target_shminfo(buf, &shminfo))
4486             return -TARGET_EFAULT;
4487         break;
4488     case SHM_INFO:
4489         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4490         if (host_to_target_shm_info(buf, &shm_info))
4491             return -TARGET_EFAULT;
4492         break;
4493     case IPC_RMID:
4494     case SHM_LOCK:
4495     case SHM_UNLOCK:
4496         ret = get_errno(shmctl(shmid, cmd, NULL));
4497         break;
4498     }
4499 
4500     return ret;
4501 }
4502 
4503 #ifndef TARGET_FORCE_SHMLBA
4504 /* For most architectures, SHMLBA is the same as the page size;
4505  * some architectures have larger values, in which case they should
4506  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4507  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4508  * and defining its own value for SHMLBA.
4509  *
4510  * The kernel also permits SHMLBA to be set by the architecture to a
4511  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4512  * this means that addresses are rounded to the large size if
4513  * SHM_RND is set but addresses not aligned to that size are not rejected
4514  * as long as they are at least page-aligned. Since the only architecture
4515  * which uses this is ia64 this code doesn't provide for that oddity.
4516  */
4517 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4518 {
4519     return TARGET_PAGE_SIZE;
4520 }
4521 #endif
4522 
4523 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4524                                  int shmid, abi_ulong shmaddr, int shmflg)
4525 {
4526     CPUState *cpu = env_cpu(cpu_env);
4527     abi_long raddr;
4528     void *host_raddr;
4529     struct shmid_ds shm_info;
4530     int i,ret;
4531     abi_ulong shmlba;
4532 
4533     /* shmat pointers are always untagged */
4534 
4535     /* find out the length of the shared memory segment */
4536     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4537     if (is_error(ret)) {
4538         /* can't get length, bail out */
4539         return ret;
4540     }
4541 
4542     shmlba = target_shmlba(cpu_env);
4543 
4544     if (shmaddr & (shmlba - 1)) {
4545         if (shmflg & SHM_RND) {
4546             shmaddr &= ~(shmlba - 1);
4547         } else {
4548             return -TARGET_EINVAL;
4549         }
4550     }
4551     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4552         return -TARGET_EINVAL;
4553     }
4554 
4555     mmap_lock();
4556 
4557     /*
4558      * We're mapping shared memory, so ensure we generate code for parallel
4559      * execution and flush old translations.  This will work up to the level
4560      * supported by the host -- anything that requires EXCP_ATOMIC will not
4561      * be atomic with respect to an external process.
4562      */
4563     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4564         cpu->tcg_cflags |= CF_PARALLEL;
4565         tb_flush(cpu);
4566     }
4567 
4568     if (shmaddr)
4569         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4570     else {
4571         abi_ulong mmap_start;
4572 
4573         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4574         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4575 
4576         if (mmap_start == -1) {
4577             errno = ENOMEM;
4578             host_raddr = (void *)-1;
4579         } else
4580             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4581                                shmflg | SHM_REMAP);
4582     }
4583 
4584     if (host_raddr == (void *)-1) {
4585         mmap_unlock();
4586         return get_errno((long)host_raddr);
4587     }
4588     raddr=h2g((unsigned long)host_raddr);
4589 
4590     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4591                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4592                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4593 
4594     for (i = 0; i < N_SHM_REGIONS; i++) {
4595         if (!shm_regions[i].in_use) {
4596             shm_regions[i].in_use = true;
4597             shm_regions[i].start = raddr;
4598             shm_regions[i].size = shm_info.shm_segsz;
4599             break;
4600         }
4601     }
4602 
4603     mmap_unlock();
4604     return raddr;
4605 
4606 }
4607 
4608 static inline abi_long do_shmdt(abi_ulong shmaddr)
4609 {
4610     int i;
4611     abi_long rv;
4612 
4613     /* shmdt pointers are always untagged */
4614 
4615     mmap_lock();
4616 
4617     for (i = 0; i < N_SHM_REGIONS; ++i) {
4618         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4619             shm_regions[i].in_use = false;
4620             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4621             break;
4622         }
4623     }
4624     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4625 
4626     mmap_unlock();
4627 
4628     return rv;
4629 }
4630 
4631 #ifdef TARGET_NR_ipc
4632 /* ??? This only works with linear mappings.  */
4633 /* do_ipc() must return target values and target errnos. */
4634 static abi_long do_ipc(CPUArchState *cpu_env,
4635                        unsigned int call, abi_long first,
4636                        abi_long second, abi_long third,
4637                        abi_long ptr, abi_long fifth)
4638 {
4639     int version;
4640     abi_long ret = 0;
4641 
4642     version = call >> 16;
4643     call &= 0xffff;
4644 
4645     switch (call) {
4646     case IPCOP_semop:
4647         ret = do_semtimedop(first, ptr, second, 0, false);
4648         break;
4649     case IPCOP_semtimedop:
4650     /*
4651      * The s390 sys_ipc variant has only five parameters instead of six
4652      * (as for default variant) and the only difference is the handling of
4653      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4654      * to a struct timespec where the generic variant uses fifth parameter.
4655      */
4656 #if defined(TARGET_S390X)
4657         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4658 #else
4659         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4660 #endif
4661         break;
4662 
4663     case IPCOP_semget:
4664         ret = get_errno(semget(first, second, third));
4665         break;
4666 
4667     case IPCOP_semctl: {
4668         /* The semun argument to semctl is passed by value, so dereference the
4669          * ptr argument. */
4670         abi_ulong atptr;
4671         get_user_ual(atptr, ptr);
4672         ret = do_semctl(first, second, third, atptr);
4673         break;
4674     }
4675 
4676     case IPCOP_msgget:
4677         ret = get_errno(msgget(first, second));
4678         break;
4679 
4680     case IPCOP_msgsnd:
4681         ret = do_msgsnd(first, ptr, second, third);
4682         break;
4683 
4684     case IPCOP_msgctl:
4685         ret = do_msgctl(first, second, ptr);
4686         break;
4687 
4688     case IPCOP_msgrcv:
4689         switch (version) {
4690         case 0:
4691             {
4692                 struct target_ipc_kludge {
4693                     abi_long msgp;
4694                     abi_long msgtyp;
4695                 } *tmp;
4696 
4697                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4698                     ret = -TARGET_EFAULT;
4699                     break;
4700                 }
4701 
4702                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4703 
4704                 unlock_user_struct(tmp, ptr, 0);
4705                 break;
4706             }
4707         default:
4708             ret = do_msgrcv(first, ptr, second, fifth, third);
4709         }
4710         break;
4711 
4712     case IPCOP_shmat:
4713         switch (version) {
4714         default:
4715         {
4716             abi_ulong raddr;
4717             raddr = do_shmat(cpu_env, first, ptr, second);
4718             if (is_error(raddr))
4719                 return get_errno(raddr);
4720             if (put_user_ual(raddr, third))
4721                 return -TARGET_EFAULT;
4722             break;
4723         }
4724         case 1:
4725             ret = -TARGET_EINVAL;
4726             break;
4727         }
4728 	break;
4729     case IPCOP_shmdt:
4730         ret = do_shmdt(ptr);
4731 	break;
4732 
4733     case IPCOP_shmget:
4734 	/* IPC_* flag values are the same on all linux platforms */
4735 	ret = get_errno(shmget(first, second, third));
4736 	break;
4737 
4738 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4739     case IPCOP_shmctl:
4740         ret = do_shmctl(first, second, ptr);
4741         break;
4742     default:
4743         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4744                       call, version);
4745 	ret = -TARGET_ENOSYS;
4746 	break;
4747     }
4748     return ret;
4749 }
4750 #endif
4751 
4752 /* kernel structure types definitions */
4753 
4754 #define STRUCT(name, ...) STRUCT_ ## name,
4755 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4756 enum {
4757 #include "syscall_types.h"
4758 STRUCT_MAX
4759 };
4760 #undef STRUCT
4761 #undef STRUCT_SPECIAL
4762 
4763 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4764 #define STRUCT_SPECIAL(name)
4765 #include "syscall_types.h"
4766 #undef STRUCT
4767 #undef STRUCT_SPECIAL
4768 
4769 #define MAX_STRUCT_SIZE 4096
4770 
4771 #ifdef CONFIG_FIEMAP
4772 /* So fiemap access checks don't overflow on 32 bit systems.
4773  * This is very slightly smaller than the limit imposed by
4774  * the underlying kernel.
4775  */
4776 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4777                             / sizeof(struct fiemap_extent))
4778 
4779 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4780                                        int fd, int cmd, abi_long arg)
4781 {
4782     /* The parameter for this ioctl is a struct fiemap followed
4783      * by an array of struct fiemap_extent whose size is set
4784      * in fiemap->fm_extent_count. The array is filled in by the
4785      * ioctl.
4786      */
4787     int target_size_in, target_size_out;
4788     struct fiemap *fm;
4789     const argtype *arg_type = ie->arg_type;
4790     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4791     void *argptr, *p;
4792     abi_long ret;
4793     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4794     uint32_t outbufsz;
4795     int free_fm = 0;
4796 
4797     assert(arg_type[0] == TYPE_PTR);
4798     assert(ie->access == IOC_RW);
4799     arg_type++;
4800     target_size_in = thunk_type_size(arg_type, 0);
4801     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4802     if (!argptr) {
4803         return -TARGET_EFAULT;
4804     }
4805     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4806     unlock_user(argptr, arg, 0);
4807     fm = (struct fiemap *)buf_temp;
4808     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4809         return -TARGET_EINVAL;
4810     }
4811 
4812     outbufsz = sizeof (*fm) +
4813         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4814 
4815     if (outbufsz > MAX_STRUCT_SIZE) {
4816         /* We can't fit all the extents into the fixed size buffer.
4817          * Allocate one that is large enough and use it instead.
4818          */
4819         fm = g_try_malloc(outbufsz);
4820         if (!fm) {
4821             return -TARGET_ENOMEM;
4822         }
4823         memcpy(fm, buf_temp, sizeof(struct fiemap));
4824         free_fm = 1;
4825     }
4826     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4827     if (!is_error(ret)) {
4828         target_size_out = target_size_in;
4829         /* An extent_count of 0 means we were only counting the extents
4830          * so there are no structs to copy
4831          */
4832         if (fm->fm_extent_count != 0) {
4833             target_size_out += fm->fm_mapped_extents * extent_size;
4834         }
4835         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4836         if (!argptr) {
4837             ret = -TARGET_EFAULT;
4838         } else {
4839             /* Convert the struct fiemap */
4840             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4841             if (fm->fm_extent_count != 0) {
4842                 p = argptr + target_size_in;
4843                 /* ...and then all the struct fiemap_extents */
4844                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4845                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4846                                   THUNK_TARGET);
4847                     p += extent_size;
4848                 }
4849             }
4850             unlock_user(argptr, arg, target_size_out);
4851         }
4852     }
4853     if (free_fm) {
4854         g_free(fm);
4855     }
4856     return ret;
4857 }
4858 #endif
4859 
4860 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4861                                 int fd, int cmd, abi_long arg)
4862 {
4863     const argtype *arg_type = ie->arg_type;
4864     int target_size;
4865     void *argptr;
4866     int ret;
4867     struct ifconf *host_ifconf;
4868     uint32_t outbufsz;
4869     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4870     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4871     int target_ifreq_size;
4872     int nb_ifreq;
4873     int free_buf = 0;
4874     int i;
4875     int target_ifc_len;
4876     abi_long target_ifc_buf;
4877     int host_ifc_len;
4878     char *host_ifc_buf;
4879 
4880     assert(arg_type[0] == TYPE_PTR);
4881     assert(ie->access == IOC_RW);
4882 
4883     arg_type++;
4884     target_size = thunk_type_size(arg_type, 0);
4885 
4886     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4887     if (!argptr)
4888         return -TARGET_EFAULT;
4889     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4890     unlock_user(argptr, arg, 0);
4891 
4892     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4893     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4894     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4895 
4896     if (target_ifc_buf != 0) {
4897         target_ifc_len = host_ifconf->ifc_len;
4898         nb_ifreq = target_ifc_len / target_ifreq_size;
4899         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4900 
4901         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4902         if (outbufsz > MAX_STRUCT_SIZE) {
4903             /*
4904              * We can't fit all the extents into the fixed size buffer.
4905              * Allocate one that is large enough and use it instead.
4906              */
4907             host_ifconf = malloc(outbufsz);
4908             if (!host_ifconf) {
4909                 return -TARGET_ENOMEM;
4910             }
4911             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4912             free_buf = 1;
4913         }
4914         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4915 
4916         host_ifconf->ifc_len = host_ifc_len;
4917     } else {
4918       host_ifc_buf = NULL;
4919     }
4920     host_ifconf->ifc_buf = host_ifc_buf;
4921 
4922     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4923     if (!is_error(ret)) {
4924 	/* convert host ifc_len to target ifc_len */
4925 
4926         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4927         target_ifc_len = nb_ifreq * target_ifreq_size;
4928         host_ifconf->ifc_len = target_ifc_len;
4929 
4930 	/* restore target ifc_buf */
4931 
4932         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4933 
4934 	/* copy struct ifconf to target user */
4935 
4936         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4937         if (!argptr)
4938             return -TARGET_EFAULT;
4939         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4940         unlock_user(argptr, arg, target_size);
4941 
4942         if (target_ifc_buf != 0) {
4943             /* copy ifreq[] to target user */
4944             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4945             for (i = 0; i < nb_ifreq ; i++) {
4946                 thunk_convert(argptr + i * target_ifreq_size,
4947                               host_ifc_buf + i * sizeof(struct ifreq),
4948                               ifreq_arg_type, THUNK_TARGET);
4949             }
4950             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4951         }
4952     }
4953 
4954     if (free_buf) {
4955         free(host_ifconf);
4956     }
4957 
4958     return ret;
4959 }
4960 
4961 #if defined(CONFIG_USBFS)
4962 #if HOST_LONG_BITS > 64
4963 #error USBDEVFS thunks do not support >64 bit hosts yet.
4964 #endif
4965 struct live_urb {
4966     uint64_t target_urb_adr;
4967     uint64_t target_buf_adr;
4968     char *target_buf_ptr;
4969     struct usbdevfs_urb host_urb;
4970 };
4971 
4972 static GHashTable *usbdevfs_urb_hashtable(void)
4973 {
4974     static GHashTable *urb_hashtable;
4975 
4976     if (!urb_hashtable) {
4977         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4978     }
4979     return urb_hashtable;
4980 }
4981 
4982 static void urb_hashtable_insert(struct live_urb *urb)
4983 {
4984     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4985     g_hash_table_insert(urb_hashtable, urb, urb);
4986 }
4987 
4988 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4989 {
4990     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4991     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4992 }
4993 
4994 static void urb_hashtable_remove(struct live_urb *urb)
4995 {
4996     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4997     g_hash_table_remove(urb_hashtable, urb);
4998 }
4999 
5000 static abi_long
5001 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5002                           int fd, int cmd, abi_long arg)
5003 {
5004     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5005     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5006     struct live_urb *lurb;
5007     void *argptr;
5008     uint64_t hurb;
5009     int target_size;
5010     uintptr_t target_urb_adr;
5011     abi_long ret;
5012 
5013     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5014 
5015     memset(buf_temp, 0, sizeof(uint64_t));
5016     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5017     if (is_error(ret)) {
5018         return ret;
5019     }
5020 
5021     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5022     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5023     if (!lurb->target_urb_adr) {
5024         return -TARGET_EFAULT;
5025     }
5026     urb_hashtable_remove(lurb);
5027     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5028         lurb->host_urb.buffer_length);
5029     lurb->target_buf_ptr = NULL;
5030 
5031     /* restore the guest buffer pointer */
5032     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5033 
5034     /* update the guest urb struct */
5035     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5036     if (!argptr) {
5037         g_free(lurb);
5038         return -TARGET_EFAULT;
5039     }
5040     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5041     unlock_user(argptr, lurb->target_urb_adr, target_size);
5042 
5043     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5044     /* write back the urb handle */
5045     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5046     if (!argptr) {
5047         g_free(lurb);
5048         return -TARGET_EFAULT;
5049     }
5050 
5051     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5052     target_urb_adr = lurb->target_urb_adr;
5053     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5054     unlock_user(argptr, arg, target_size);
5055 
5056     g_free(lurb);
5057     return ret;
5058 }
5059 
5060 static abi_long
5061 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5062                              uint8_t *buf_temp __attribute__((unused)),
5063                              int fd, int cmd, abi_long arg)
5064 {
5065     struct live_urb *lurb;
5066 
5067     /* map target address back to host URB with metadata. */
5068     lurb = urb_hashtable_lookup(arg);
5069     if (!lurb) {
5070         return -TARGET_EFAULT;
5071     }
5072     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5073 }
5074 
5075 static abi_long
5076 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5077                             int fd, int cmd, abi_long arg)
5078 {
5079     const argtype *arg_type = ie->arg_type;
5080     int target_size;
5081     abi_long ret;
5082     void *argptr;
5083     int rw_dir;
5084     struct live_urb *lurb;
5085 
5086     /*
5087      * each submitted URB needs to map to a unique ID for the
5088      * kernel, and that unique ID needs to be a pointer to
5089      * host memory.  hence, we need to malloc for each URB.
5090      * isochronous transfers have a variable length struct.
5091      */
5092     arg_type++;
5093     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5094 
5095     /* construct host copy of urb and metadata */
5096     lurb = g_try_malloc0(sizeof(struct live_urb));
5097     if (!lurb) {
5098         return -TARGET_ENOMEM;
5099     }
5100 
5101     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5102     if (!argptr) {
5103         g_free(lurb);
5104         return -TARGET_EFAULT;
5105     }
5106     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5107     unlock_user(argptr, arg, 0);
5108 
5109     lurb->target_urb_adr = arg;
5110     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5111 
5112     /* buffer space used depends on endpoint type so lock the entire buffer */
5113     /* control type urbs should check the buffer contents for true direction */
5114     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5115     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5116         lurb->host_urb.buffer_length, 1);
5117     if (lurb->target_buf_ptr == NULL) {
5118         g_free(lurb);
5119         return -TARGET_EFAULT;
5120     }
5121 
5122     /* update buffer pointer in host copy */
5123     lurb->host_urb.buffer = lurb->target_buf_ptr;
5124 
5125     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5126     if (is_error(ret)) {
5127         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5128         g_free(lurb);
5129     } else {
5130         urb_hashtable_insert(lurb);
5131     }
5132 
5133     return ret;
5134 }
5135 #endif /* CONFIG_USBFS */
5136 
5137 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5138                             int cmd, abi_long arg)
5139 {
5140     void *argptr;
5141     struct dm_ioctl *host_dm;
5142     abi_long guest_data;
5143     uint32_t guest_data_size;
5144     int target_size;
5145     const argtype *arg_type = ie->arg_type;
5146     abi_long ret;
5147     void *big_buf = NULL;
5148     char *host_data;
5149 
5150     arg_type++;
5151     target_size = thunk_type_size(arg_type, 0);
5152     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5153     if (!argptr) {
5154         ret = -TARGET_EFAULT;
5155         goto out;
5156     }
5157     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5158     unlock_user(argptr, arg, 0);
5159 
5160     /* buf_temp is too small, so fetch things into a bigger buffer */
5161     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5162     memcpy(big_buf, buf_temp, target_size);
5163     buf_temp = big_buf;
5164     host_dm = big_buf;
5165 
5166     guest_data = arg + host_dm->data_start;
5167     if ((guest_data - arg) < 0) {
5168         ret = -TARGET_EINVAL;
5169         goto out;
5170     }
5171     guest_data_size = host_dm->data_size - host_dm->data_start;
5172     host_data = (char*)host_dm + host_dm->data_start;
5173 
5174     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5175     if (!argptr) {
5176         ret = -TARGET_EFAULT;
5177         goto out;
5178     }
5179 
5180     switch (ie->host_cmd) {
5181     case DM_REMOVE_ALL:
5182     case DM_LIST_DEVICES:
5183     case DM_DEV_CREATE:
5184     case DM_DEV_REMOVE:
5185     case DM_DEV_SUSPEND:
5186     case DM_DEV_STATUS:
5187     case DM_DEV_WAIT:
5188     case DM_TABLE_STATUS:
5189     case DM_TABLE_CLEAR:
5190     case DM_TABLE_DEPS:
5191     case DM_LIST_VERSIONS:
5192         /* no input data */
5193         break;
5194     case DM_DEV_RENAME:
5195     case DM_DEV_SET_GEOMETRY:
5196         /* data contains only strings */
5197         memcpy(host_data, argptr, guest_data_size);
5198         break;
5199     case DM_TARGET_MSG:
5200         memcpy(host_data, argptr, guest_data_size);
5201         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5202         break;
5203     case DM_TABLE_LOAD:
5204     {
5205         void *gspec = argptr;
5206         void *cur_data = host_data;
5207         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5208         int spec_size = thunk_type_size(arg_type, 0);
5209         int i;
5210 
5211         for (i = 0; i < host_dm->target_count; i++) {
5212             struct dm_target_spec *spec = cur_data;
5213             uint32_t next;
5214             int slen;
5215 
5216             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5217             slen = strlen((char*)gspec + spec_size) + 1;
5218             next = spec->next;
5219             spec->next = sizeof(*spec) + slen;
5220             strcpy((char*)&spec[1], gspec + spec_size);
5221             gspec += next;
5222             cur_data += spec->next;
5223         }
5224         break;
5225     }
5226     default:
5227         ret = -TARGET_EINVAL;
5228         unlock_user(argptr, guest_data, 0);
5229         goto out;
5230     }
5231     unlock_user(argptr, guest_data, 0);
5232 
5233     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5234     if (!is_error(ret)) {
5235         guest_data = arg + host_dm->data_start;
5236         guest_data_size = host_dm->data_size - host_dm->data_start;
5237         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5238         switch (ie->host_cmd) {
5239         case DM_REMOVE_ALL:
5240         case DM_DEV_CREATE:
5241         case DM_DEV_REMOVE:
5242         case DM_DEV_RENAME:
5243         case DM_DEV_SUSPEND:
5244         case DM_DEV_STATUS:
5245         case DM_TABLE_LOAD:
5246         case DM_TABLE_CLEAR:
5247         case DM_TARGET_MSG:
5248         case DM_DEV_SET_GEOMETRY:
5249             /* no return data */
5250             break;
5251         case DM_LIST_DEVICES:
5252         {
5253             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5254             uint32_t remaining_data = guest_data_size;
5255             void *cur_data = argptr;
5256             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5257             int nl_size = 12; /* can't use thunk_size due to alignment */
5258 
5259             while (1) {
5260                 uint32_t next = nl->next;
5261                 if (next) {
5262                     nl->next = nl_size + (strlen(nl->name) + 1);
5263                 }
5264                 if (remaining_data < nl->next) {
5265                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5266                     break;
5267                 }
5268                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5269                 strcpy(cur_data + nl_size, nl->name);
5270                 cur_data += nl->next;
5271                 remaining_data -= nl->next;
5272                 if (!next) {
5273                     break;
5274                 }
5275                 nl = (void*)nl + next;
5276             }
5277             break;
5278         }
5279         case DM_DEV_WAIT:
5280         case DM_TABLE_STATUS:
5281         {
5282             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5283             void *cur_data = argptr;
5284             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5285             int spec_size = thunk_type_size(arg_type, 0);
5286             int i;
5287 
5288             for (i = 0; i < host_dm->target_count; i++) {
5289                 uint32_t next = spec->next;
5290                 int slen = strlen((char*)&spec[1]) + 1;
5291                 spec->next = (cur_data - argptr) + spec_size + slen;
5292                 if (guest_data_size < spec->next) {
5293                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5294                     break;
5295                 }
5296                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5297                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5298                 cur_data = argptr + spec->next;
5299                 spec = (void*)host_dm + host_dm->data_start + next;
5300             }
5301             break;
5302         }
5303         case DM_TABLE_DEPS:
5304         {
5305             void *hdata = (void*)host_dm + host_dm->data_start;
5306             int count = *(uint32_t*)hdata;
5307             uint64_t *hdev = hdata + 8;
5308             uint64_t *gdev = argptr + 8;
5309             int i;
5310 
5311             *(uint32_t*)argptr = tswap32(count);
5312             for (i = 0; i < count; i++) {
5313                 *gdev = tswap64(*hdev);
5314                 gdev++;
5315                 hdev++;
5316             }
5317             break;
5318         }
5319         case DM_LIST_VERSIONS:
5320         {
5321             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5322             uint32_t remaining_data = guest_data_size;
5323             void *cur_data = argptr;
5324             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5325             int vers_size = thunk_type_size(arg_type, 0);
5326 
5327             while (1) {
5328                 uint32_t next = vers->next;
5329                 if (next) {
5330                     vers->next = vers_size + (strlen(vers->name) + 1);
5331                 }
5332                 if (remaining_data < vers->next) {
5333                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5334                     break;
5335                 }
5336                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5337                 strcpy(cur_data + vers_size, vers->name);
5338                 cur_data += vers->next;
5339                 remaining_data -= vers->next;
5340                 if (!next) {
5341                     break;
5342                 }
5343                 vers = (void*)vers + next;
5344             }
5345             break;
5346         }
5347         default:
5348             unlock_user(argptr, guest_data, 0);
5349             ret = -TARGET_EINVAL;
5350             goto out;
5351         }
5352         unlock_user(argptr, guest_data, guest_data_size);
5353 
5354         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5355         if (!argptr) {
5356             ret = -TARGET_EFAULT;
5357             goto out;
5358         }
5359         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5360         unlock_user(argptr, arg, target_size);
5361     }
5362 out:
5363     g_free(big_buf);
5364     return ret;
5365 }
5366 
5367 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5368                                int cmd, abi_long arg)
5369 {
5370     void *argptr;
5371     int target_size;
5372     const argtype *arg_type = ie->arg_type;
5373     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5374     abi_long ret;
5375 
5376     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5377     struct blkpg_partition host_part;
5378 
5379     /* Read and convert blkpg */
5380     arg_type++;
5381     target_size = thunk_type_size(arg_type, 0);
5382     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5383     if (!argptr) {
5384         ret = -TARGET_EFAULT;
5385         goto out;
5386     }
5387     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5388     unlock_user(argptr, arg, 0);
5389 
5390     switch (host_blkpg->op) {
5391     case BLKPG_ADD_PARTITION:
5392     case BLKPG_DEL_PARTITION:
5393         /* payload is struct blkpg_partition */
5394         break;
5395     default:
5396         /* Unknown opcode */
5397         ret = -TARGET_EINVAL;
5398         goto out;
5399     }
5400 
5401     /* Read and convert blkpg->data */
5402     arg = (abi_long)(uintptr_t)host_blkpg->data;
5403     target_size = thunk_type_size(part_arg_type, 0);
5404     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5405     if (!argptr) {
5406         ret = -TARGET_EFAULT;
5407         goto out;
5408     }
5409     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5410     unlock_user(argptr, arg, 0);
5411 
5412     /* Swizzle the data pointer to our local copy and call! */
5413     host_blkpg->data = &host_part;
5414     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5415 
5416 out:
5417     return ret;
5418 }
5419 
5420 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5421                                 int fd, int cmd, abi_long arg)
5422 {
5423     const argtype *arg_type = ie->arg_type;
5424     const StructEntry *se;
5425     const argtype *field_types;
5426     const int *dst_offsets, *src_offsets;
5427     int target_size;
5428     void *argptr;
5429     abi_ulong *target_rt_dev_ptr = NULL;
5430     unsigned long *host_rt_dev_ptr = NULL;
5431     abi_long ret;
5432     int i;
5433 
5434     assert(ie->access == IOC_W);
5435     assert(*arg_type == TYPE_PTR);
5436     arg_type++;
5437     assert(*arg_type == TYPE_STRUCT);
5438     target_size = thunk_type_size(arg_type, 0);
5439     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5440     if (!argptr) {
5441         return -TARGET_EFAULT;
5442     }
5443     arg_type++;
5444     assert(*arg_type == (int)STRUCT_rtentry);
5445     se = struct_entries + *arg_type++;
5446     assert(se->convert[0] == NULL);
5447     /* convert struct here to be able to catch rt_dev string */
5448     field_types = se->field_types;
5449     dst_offsets = se->field_offsets[THUNK_HOST];
5450     src_offsets = se->field_offsets[THUNK_TARGET];
5451     for (i = 0; i < se->nb_fields; i++) {
5452         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5453             assert(*field_types == TYPE_PTRVOID);
5454             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5455             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5456             if (*target_rt_dev_ptr != 0) {
5457                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5458                                                   tswapal(*target_rt_dev_ptr));
5459                 if (!*host_rt_dev_ptr) {
5460                     unlock_user(argptr, arg, 0);
5461                     return -TARGET_EFAULT;
5462                 }
5463             } else {
5464                 *host_rt_dev_ptr = 0;
5465             }
5466             field_types++;
5467             continue;
5468         }
5469         field_types = thunk_convert(buf_temp + dst_offsets[i],
5470                                     argptr + src_offsets[i],
5471                                     field_types, THUNK_HOST);
5472     }
5473     unlock_user(argptr, arg, 0);
5474 
5475     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5476 
5477     assert(host_rt_dev_ptr != NULL);
5478     assert(target_rt_dev_ptr != NULL);
5479     if (*host_rt_dev_ptr != 0) {
5480         unlock_user((void *)*host_rt_dev_ptr,
5481                     *target_rt_dev_ptr, 0);
5482     }
5483     return ret;
5484 }
5485 
5486 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5487                                      int fd, int cmd, abi_long arg)
5488 {
5489     int sig = target_to_host_signal(arg);
5490     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5491 }
5492 
5493 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5494                                     int fd, int cmd, abi_long arg)
5495 {
5496     struct timeval tv;
5497     abi_long ret;
5498 
5499     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5500     if (is_error(ret)) {
5501         return ret;
5502     }
5503 
5504     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5505         if (copy_to_user_timeval(arg, &tv)) {
5506             return -TARGET_EFAULT;
5507         }
5508     } else {
5509         if (copy_to_user_timeval64(arg, &tv)) {
5510             return -TARGET_EFAULT;
5511         }
5512     }
5513 
5514     return ret;
5515 }
5516 
5517 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5518                                       int fd, int cmd, abi_long arg)
5519 {
5520     struct timespec ts;
5521     abi_long ret;
5522 
5523     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5524     if (is_error(ret)) {
5525         return ret;
5526     }
5527 
5528     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5529         if (host_to_target_timespec(arg, &ts)) {
5530             return -TARGET_EFAULT;
5531         }
5532     } else{
5533         if (host_to_target_timespec64(arg, &ts)) {
5534             return -TARGET_EFAULT;
5535         }
5536     }
5537 
5538     return ret;
5539 }
5540 
5541 #ifdef TIOCGPTPEER
5542 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5543                                      int fd, int cmd, abi_long arg)
5544 {
5545     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5546     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5547 }
5548 #endif
5549 
5550 #ifdef HAVE_DRM_H
5551 
5552 static void unlock_drm_version(struct drm_version *host_ver,
5553                                struct target_drm_version *target_ver,
5554                                bool copy)
5555 {
5556     unlock_user(host_ver->name, target_ver->name,
5557                                 copy ? host_ver->name_len : 0);
5558     unlock_user(host_ver->date, target_ver->date,
5559                                 copy ? host_ver->date_len : 0);
5560     unlock_user(host_ver->desc, target_ver->desc,
5561                                 copy ? host_ver->desc_len : 0);
5562 }
5563 
5564 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5565                                           struct target_drm_version *target_ver)
5566 {
5567     memset(host_ver, 0, sizeof(*host_ver));
5568 
5569     __get_user(host_ver->name_len, &target_ver->name_len);
5570     if (host_ver->name_len) {
5571         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5572                                    target_ver->name_len, 0);
5573         if (!host_ver->name) {
5574             return -EFAULT;
5575         }
5576     }
5577 
5578     __get_user(host_ver->date_len, &target_ver->date_len);
5579     if (host_ver->date_len) {
5580         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5581                                    target_ver->date_len, 0);
5582         if (!host_ver->date) {
5583             goto err;
5584         }
5585     }
5586 
5587     __get_user(host_ver->desc_len, &target_ver->desc_len);
5588     if (host_ver->desc_len) {
5589         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5590                                    target_ver->desc_len, 0);
5591         if (!host_ver->desc) {
5592             goto err;
5593         }
5594     }
5595 
5596     return 0;
5597 err:
5598     unlock_drm_version(host_ver, target_ver, false);
5599     return -EFAULT;
5600 }
5601 
5602 static inline void host_to_target_drmversion(
5603                                           struct target_drm_version *target_ver,
5604                                           struct drm_version *host_ver)
5605 {
5606     __put_user(host_ver->version_major, &target_ver->version_major);
5607     __put_user(host_ver->version_minor, &target_ver->version_minor);
5608     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5609     __put_user(host_ver->name_len, &target_ver->name_len);
5610     __put_user(host_ver->date_len, &target_ver->date_len);
5611     __put_user(host_ver->desc_len, &target_ver->desc_len);
5612     unlock_drm_version(host_ver, target_ver, true);
5613 }
5614 
5615 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5616                              int fd, int cmd, abi_long arg)
5617 {
5618     struct drm_version *ver;
5619     struct target_drm_version *target_ver;
5620     abi_long ret;
5621 
5622     switch (ie->host_cmd) {
5623     case DRM_IOCTL_VERSION:
5624         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5625             return -TARGET_EFAULT;
5626         }
5627         ver = (struct drm_version *)buf_temp;
5628         ret = target_to_host_drmversion(ver, target_ver);
5629         if (!is_error(ret)) {
5630             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5631             if (is_error(ret)) {
5632                 unlock_drm_version(ver, target_ver, false);
5633             } else {
5634                 host_to_target_drmversion(target_ver, ver);
5635             }
5636         }
5637         unlock_user_struct(target_ver, arg, 0);
5638         return ret;
5639     }
5640     return -TARGET_ENOSYS;
5641 }
5642 
5643 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5644                                            struct drm_i915_getparam *gparam,
5645                                            int fd, abi_long arg)
5646 {
5647     abi_long ret;
5648     int value;
5649     struct target_drm_i915_getparam *target_gparam;
5650 
5651     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5652         return -TARGET_EFAULT;
5653     }
5654 
5655     __get_user(gparam->param, &target_gparam->param);
5656     gparam->value = &value;
5657     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5658     put_user_s32(value, target_gparam->value);
5659 
5660     unlock_user_struct(target_gparam, arg, 0);
5661     return ret;
5662 }
5663 
5664 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5665                                   int fd, int cmd, abi_long arg)
5666 {
5667     switch (ie->host_cmd) {
5668     case DRM_IOCTL_I915_GETPARAM:
5669         return do_ioctl_drm_i915_getparam(ie,
5670                                           (struct drm_i915_getparam *)buf_temp,
5671                                           fd, arg);
5672     default:
5673         return -TARGET_ENOSYS;
5674     }
5675 }
5676 
5677 #endif
5678 
5679 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5680                                         int fd, int cmd, abi_long arg)
5681 {
5682     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5683     struct tun_filter *target_filter;
5684     char *target_addr;
5685 
5686     assert(ie->access == IOC_W);
5687 
5688     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5689     if (!target_filter) {
5690         return -TARGET_EFAULT;
5691     }
5692     filter->flags = tswap16(target_filter->flags);
5693     filter->count = tswap16(target_filter->count);
5694     unlock_user(target_filter, arg, 0);
5695 
5696     if (filter->count) {
5697         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5698             MAX_STRUCT_SIZE) {
5699             return -TARGET_EFAULT;
5700         }
5701 
5702         target_addr = lock_user(VERIFY_READ,
5703                                 arg + offsetof(struct tun_filter, addr),
5704                                 filter->count * ETH_ALEN, 1);
5705         if (!target_addr) {
5706             return -TARGET_EFAULT;
5707         }
5708         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5709         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5710     }
5711 
5712     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5713 }
5714 
5715 IOCTLEntry ioctl_entries[] = {
5716 #define IOCTL(cmd, access, ...) \
5717     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5718 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5719     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5720 #define IOCTL_IGNORE(cmd) \
5721     { TARGET_ ## cmd, 0, #cmd },
5722 #include "ioctls.h"
5723     { 0, 0, },
5724 };
5725 
5726 /* ??? Implement proper locking for ioctls.  */
5727 /* do_ioctl() Must return target values and target errnos. */
5728 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5729 {
5730     const IOCTLEntry *ie;
5731     const argtype *arg_type;
5732     abi_long ret;
5733     uint8_t buf_temp[MAX_STRUCT_SIZE];
5734     int target_size;
5735     void *argptr;
5736 
5737     ie = ioctl_entries;
5738     for(;;) {
5739         if (ie->target_cmd == 0) {
5740             qemu_log_mask(
5741                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5742             return -TARGET_ENOSYS;
5743         }
5744         if (ie->target_cmd == cmd)
5745             break;
5746         ie++;
5747     }
5748     arg_type = ie->arg_type;
5749     if (ie->do_ioctl) {
5750         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5751     } else if (!ie->host_cmd) {
5752         /* Some architectures define BSD ioctls in their headers
5753            that are not implemented in Linux.  */
5754         return -TARGET_ENOSYS;
5755     }
5756 
5757     switch(arg_type[0]) {
5758     case TYPE_NULL:
5759         /* no argument */
5760         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5761         break;
5762     case TYPE_PTRVOID:
5763     case TYPE_INT:
5764     case TYPE_LONG:
5765     case TYPE_ULONG:
5766         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5767         break;
5768     case TYPE_PTR:
5769         arg_type++;
5770         target_size = thunk_type_size(arg_type, 0);
5771         switch(ie->access) {
5772         case IOC_R:
5773             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5774             if (!is_error(ret)) {
5775                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5776                 if (!argptr)
5777                     return -TARGET_EFAULT;
5778                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5779                 unlock_user(argptr, arg, target_size);
5780             }
5781             break;
5782         case IOC_W:
5783             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5784             if (!argptr)
5785                 return -TARGET_EFAULT;
5786             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5787             unlock_user(argptr, arg, 0);
5788             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5789             break;
5790         default:
5791         case IOC_RW:
5792             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5793             if (!argptr)
5794                 return -TARGET_EFAULT;
5795             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5796             unlock_user(argptr, arg, 0);
5797             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5798             if (!is_error(ret)) {
5799                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5800                 if (!argptr)
5801                     return -TARGET_EFAULT;
5802                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5803                 unlock_user(argptr, arg, target_size);
5804             }
5805             break;
5806         }
5807         break;
5808     default:
5809         qemu_log_mask(LOG_UNIMP,
5810                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5811                       (long)cmd, arg_type[0]);
5812         ret = -TARGET_ENOSYS;
5813         break;
5814     }
5815     return ret;
5816 }
5817 
5818 static const bitmask_transtbl iflag_tbl[] = {
5819         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5820         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5821         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5822         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5823         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5824         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5825         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5826         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5827         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5828         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5829         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5830         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5831         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5832         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5833         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5834         { 0, 0, 0, 0 }
5835 };
5836 
5837 static const bitmask_transtbl oflag_tbl[] = {
5838 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5839 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5840 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5841 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5842 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5843 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5844 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5845 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5846 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5847 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5848 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5849 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5850 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5851 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5852 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5853 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5854 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5855 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5856 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5857 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5858 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5859 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5860 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5861 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5862 	{ 0, 0, 0, 0 }
5863 };
5864 
5865 static const bitmask_transtbl cflag_tbl[] = {
5866 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5867 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5868 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5869 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5870 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5871 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5872 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5873 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5874 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5875 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5876 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5877 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5878 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5879 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5880 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5881 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5882 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5883 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5884 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5885 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5886 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5887 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5888 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5889 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5890 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5891 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5892 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5893 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5894 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5895 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5896 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5897 	{ 0, 0, 0, 0 }
5898 };
5899 
5900 static const bitmask_transtbl lflag_tbl[] = {
5901   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5902   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5903   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5904   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5905   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5906   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5907   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5908   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5909   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5910   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5911   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5912   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5913   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5914   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5915   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5916   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5917   { 0, 0, 0, 0 }
5918 };
5919 
5920 static void target_to_host_termios (void *dst, const void *src)
5921 {
5922     struct host_termios *host = dst;
5923     const struct target_termios *target = src;
5924 
5925     host->c_iflag =
5926         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5927     host->c_oflag =
5928         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5929     host->c_cflag =
5930         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5931     host->c_lflag =
5932         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5933     host->c_line = target->c_line;
5934 
5935     memset(host->c_cc, 0, sizeof(host->c_cc));
5936     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5937     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5938     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5939     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5940     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5941     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5942     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5943     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5944     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5945     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5946     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5947     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5948     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5949     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5950     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5951     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5952     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5953 }
5954 
5955 static void host_to_target_termios (void *dst, const void *src)
5956 {
5957     struct target_termios *target = dst;
5958     const struct host_termios *host = src;
5959 
5960     target->c_iflag =
5961         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5962     target->c_oflag =
5963         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5964     target->c_cflag =
5965         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5966     target->c_lflag =
5967         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5968     target->c_line = host->c_line;
5969 
5970     memset(target->c_cc, 0, sizeof(target->c_cc));
5971     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5972     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5973     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5974     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5975     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5976     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5977     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5978     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5979     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5980     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5981     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5982     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5983     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5984     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5985     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5986     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5987     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5988 }
5989 
5990 static const StructEntry struct_termios_def = {
5991     .convert = { host_to_target_termios, target_to_host_termios },
5992     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5993     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5994     .print = print_termios,
5995 };
5996 
5997 static const bitmask_transtbl mmap_flags_tbl[] = {
5998     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5999     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6000     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6001     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6002       MAP_ANONYMOUS, MAP_ANONYMOUS },
6003     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6004       MAP_GROWSDOWN, MAP_GROWSDOWN },
6005     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6006       MAP_DENYWRITE, MAP_DENYWRITE },
6007     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6008       MAP_EXECUTABLE, MAP_EXECUTABLE },
6009     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6010     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6011       MAP_NORESERVE, MAP_NORESERVE },
6012     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6013     /* MAP_STACK had been ignored by the kernel for quite some time.
6014        Recognize it for the target insofar as we do not want to pass
6015        it through to the host.  */
6016     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6017     { 0, 0, 0, 0 }
6018 };
6019 
6020 /*
6021  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6022  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6023  */
6024 #if defined(TARGET_I386)
6025 
6026 /* NOTE: there is really one LDT for all the threads */
6027 static uint8_t *ldt_table;
6028 
6029 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6030 {
6031     int size;
6032     void *p;
6033 
6034     if (!ldt_table)
6035         return 0;
6036     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6037     if (size > bytecount)
6038         size = bytecount;
6039     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6040     if (!p)
6041         return -TARGET_EFAULT;
6042     /* ??? Should this by byteswapped?  */
6043     memcpy(p, ldt_table, size);
6044     unlock_user(p, ptr, size);
6045     return size;
6046 }
6047 
6048 /* XXX: add locking support */
6049 static abi_long write_ldt(CPUX86State *env,
6050                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6051 {
6052     struct target_modify_ldt_ldt_s ldt_info;
6053     struct target_modify_ldt_ldt_s *target_ldt_info;
6054     int seg_32bit, contents, read_exec_only, limit_in_pages;
6055     int seg_not_present, useable, lm;
6056     uint32_t *lp, entry_1, entry_2;
6057 
6058     if (bytecount != sizeof(ldt_info))
6059         return -TARGET_EINVAL;
6060     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6061         return -TARGET_EFAULT;
6062     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6063     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6064     ldt_info.limit = tswap32(target_ldt_info->limit);
6065     ldt_info.flags = tswap32(target_ldt_info->flags);
6066     unlock_user_struct(target_ldt_info, ptr, 0);
6067 
6068     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6069         return -TARGET_EINVAL;
6070     seg_32bit = ldt_info.flags & 1;
6071     contents = (ldt_info.flags >> 1) & 3;
6072     read_exec_only = (ldt_info.flags >> 3) & 1;
6073     limit_in_pages = (ldt_info.flags >> 4) & 1;
6074     seg_not_present = (ldt_info.flags >> 5) & 1;
6075     useable = (ldt_info.flags >> 6) & 1;
6076 #ifdef TARGET_ABI32
6077     lm = 0;
6078 #else
6079     lm = (ldt_info.flags >> 7) & 1;
6080 #endif
6081     if (contents == 3) {
6082         if (oldmode)
6083             return -TARGET_EINVAL;
6084         if (seg_not_present == 0)
6085             return -TARGET_EINVAL;
6086     }
6087     /* allocate the LDT */
6088     if (!ldt_table) {
6089         env->ldt.base = target_mmap(0,
6090                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6091                                     PROT_READ|PROT_WRITE,
6092                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6093         if (env->ldt.base == -1)
6094             return -TARGET_ENOMEM;
6095         memset(g2h_untagged(env->ldt.base), 0,
6096                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6097         env->ldt.limit = 0xffff;
6098         ldt_table = g2h_untagged(env->ldt.base);
6099     }
6100 
6101     /* NOTE: same code as Linux kernel */
6102     /* Allow LDTs to be cleared by the user. */
6103     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6104         if (oldmode ||
6105             (contents == 0		&&
6106              read_exec_only == 1	&&
6107              seg_32bit == 0		&&
6108              limit_in_pages == 0	&&
6109              seg_not_present == 1	&&
6110              useable == 0 )) {
6111             entry_1 = 0;
6112             entry_2 = 0;
6113             goto install;
6114         }
6115     }
6116 
6117     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6118         (ldt_info.limit & 0x0ffff);
6119     entry_2 = (ldt_info.base_addr & 0xff000000) |
6120         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6121         (ldt_info.limit & 0xf0000) |
6122         ((read_exec_only ^ 1) << 9) |
6123         (contents << 10) |
6124         ((seg_not_present ^ 1) << 15) |
6125         (seg_32bit << 22) |
6126         (limit_in_pages << 23) |
6127         (lm << 21) |
6128         0x7000;
6129     if (!oldmode)
6130         entry_2 |= (useable << 20);
6131 
6132     /* Install the new entry ...  */
6133 install:
6134     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6135     lp[0] = tswap32(entry_1);
6136     lp[1] = tswap32(entry_2);
6137     return 0;
6138 }
6139 
6140 /* specific and weird i386 syscalls */
6141 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6142                               unsigned long bytecount)
6143 {
6144     abi_long ret;
6145 
6146     switch (func) {
6147     case 0:
6148         ret = read_ldt(ptr, bytecount);
6149         break;
6150     case 1:
6151         ret = write_ldt(env, ptr, bytecount, 1);
6152         break;
6153     case 0x11:
6154         ret = write_ldt(env, ptr, bytecount, 0);
6155         break;
6156     default:
6157         ret = -TARGET_ENOSYS;
6158         break;
6159     }
6160     return ret;
6161 }
6162 
6163 #if defined(TARGET_ABI32)
6164 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6165 {
6166     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6167     struct target_modify_ldt_ldt_s ldt_info;
6168     struct target_modify_ldt_ldt_s *target_ldt_info;
6169     int seg_32bit, contents, read_exec_only, limit_in_pages;
6170     int seg_not_present, useable, lm;
6171     uint32_t *lp, entry_1, entry_2;
6172     int i;
6173 
6174     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6175     if (!target_ldt_info)
6176         return -TARGET_EFAULT;
6177     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6178     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6179     ldt_info.limit = tswap32(target_ldt_info->limit);
6180     ldt_info.flags = tswap32(target_ldt_info->flags);
6181     if (ldt_info.entry_number == -1) {
6182         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6183             if (gdt_table[i] == 0) {
6184                 ldt_info.entry_number = i;
6185                 target_ldt_info->entry_number = tswap32(i);
6186                 break;
6187             }
6188         }
6189     }
6190     unlock_user_struct(target_ldt_info, ptr, 1);
6191 
6192     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6193         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6194            return -TARGET_EINVAL;
6195     seg_32bit = ldt_info.flags & 1;
6196     contents = (ldt_info.flags >> 1) & 3;
6197     read_exec_only = (ldt_info.flags >> 3) & 1;
6198     limit_in_pages = (ldt_info.flags >> 4) & 1;
6199     seg_not_present = (ldt_info.flags >> 5) & 1;
6200     useable = (ldt_info.flags >> 6) & 1;
6201 #ifdef TARGET_ABI32
6202     lm = 0;
6203 #else
6204     lm = (ldt_info.flags >> 7) & 1;
6205 #endif
6206 
6207     if (contents == 3) {
6208         if (seg_not_present == 0)
6209             return -TARGET_EINVAL;
6210     }
6211 
6212     /* NOTE: same code as Linux kernel */
6213     /* Allow LDTs to be cleared by the user. */
6214     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6215         if ((contents == 0             &&
6216              read_exec_only == 1       &&
6217              seg_32bit == 0            &&
6218              limit_in_pages == 0       &&
6219              seg_not_present == 1      &&
6220              useable == 0 )) {
6221             entry_1 = 0;
6222             entry_2 = 0;
6223             goto install;
6224         }
6225     }
6226 
6227     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6228         (ldt_info.limit & 0x0ffff);
6229     entry_2 = (ldt_info.base_addr & 0xff000000) |
6230         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6231         (ldt_info.limit & 0xf0000) |
6232         ((read_exec_only ^ 1) << 9) |
6233         (contents << 10) |
6234         ((seg_not_present ^ 1) << 15) |
6235         (seg_32bit << 22) |
6236         (limit_in_pages << 23) |
6237         (useable << 20) |
6238         (lm << 21) |
6239         0x7000;
6240 
6241     /* Install the new entry ...  */
6242 install:
6243     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6244     lp[0] = tswap32(entry_1);
6245     lp[1] = tswap32(entry_2);
6246     return 0;
6247 }
6248 
6249 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6250 {
6251     struct target_modify_ldt_ldt_s *target_ldt_info;
6252     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6253     uint32_t base_addr, limit, flags;
6254     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6255     int seg_not_present, useable, lm;
6256     uint32_t *lp, entry_1, entry_2;
6257 
6258     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6259     if (!target_ldt_info)
6260         return -TARGET_EFAULT;
6261     idx = tswap32(target_ldt_info->entry_number);
6262     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6263         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6264         unlock_user_struct(target_ldt_info, ptr, 1);
6265         return -TARGET_EINVAL;
6266     }
6267     lp = (uint32_t *)(gdt_table + idx);
6268     entry_1 = tswap32(lp[0]);
6269     entry_2 = tswap32(lp[1]);
6270 
6271     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6272     contents = (entry_2 >> 10) & 3;
6273     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6274     seg_32bit = (entry_2 >> 22) & 1;
6275     limit_in_pages = (entry_2 >> 23) & 1;
6276     useable = (entry_2 >> 20) & 1;
6277 #ifdef TARGET_ABI32
6278     lm = 0;
6279 #else
6280     lm = (entry_2 >> 21) & 1;
6281 #endif
6282     flags = (seg_32bit << 0) | (contents << 1) |
6283         (read_exec_only << 3) | (limit_in_pages << 4) |
6284         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6285     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6286     base_addr = (entry_1 >> 16) |
6287         (entry_2 & 0xff000000) |
6288         ((entry_2 & 0xff) << 16);
6289     target_ldt_info->base_addr = tswapal(base_addr);
6290     target_ldt_info->limit = tswap32(limit);
6291     target_ldt_info->flags = tswap32(flags);
6292     unlock_user_struct(target_ldt_info, ptr, 1);
6293     return 0;
6294 }
6295 
6296 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6297 {
6298     return -TARGET_ENOSYS;
6299 }
6300 #else
6301 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6302 {
6303     abi_long ret = 0;
6304     abi_ulong val;
6305     int idx;
6306 
6307     switch(code) {
6308     case TARGET_ARCH_SET_GS:
6309     case TARGET_ARCH_SET_FS:
6310         if (code == TARGET_ARCH_SET_GS)
6311             idx = R_GS;
6312         else
6313             idx = R_FS;
6314         cpu_x86_load_seg(env, idx, 0);
6315         env->segs[idx].base = addr;
6316         break;
6317     case TARGET_ARCH_GET_GS:
6318     case TARGET_ARCH_GET_FS:
6319         if (code == TARGET_ARCH_GET_GS)
6320             idx = R_GS;
6321         else
6322             idx = R_FS;
6323         val = env->segs[idx].base;
6324         if (put_user(val, addr, abi_ulong))
6325             ret = -TARGET_EFAULT;
6326         break;
6327     default:
6328         ret = -TARGET_EINVAL;
6329         break;
6330     }
6331     return ret;
6332 }
6333 #endif /* defined(TARGET_ABI32 */
6334 #endif /* defined(TARGET_I386) */
6335 
6336 /*
6337  * These constants are generic.  Supply any that are missing from the host.
6338  */
6339 #ifndef PR_SET_NAME
6340 # define PR_SET_NAME    15
6341 # define PR_GET_NAME    16
6342 #endif
6343 #ifndef PR_SET_FP_MODE
6344 # define PR_SET_FP_MODE 45
6345 # define PR_GET_FP_MODE 46
6346 # define PR_FP_MODE_FR   (1 << 0)
6347 # define PR_FP_MODE_FRE  (1 << 1)
6348 #endif
6349 #ifndef PR_SVE_SET_VL
6350 # define PR_SVE_SET_VL  50
6351 # define PR_SVE_GET_VL  51
6352 # define PR_SVE_VL_LEN_MASK  0xffff
6353 # define PR_SVE_VL_INHERIT   (1 << 17)
6354 #endif
6355 #ifndef PR_PAC_RESET_KEYS
6356 # define PR_PAC_RESET_KEYS  54
6357 # define PR_PAC_APIAKEY   (1 << 0)
6358 # define PR_PAC_APIBKEY   (1 << 1)
6359 # define PR_PAC_APDAKEY   (1 << 2)
6360 # define PR_PAC_APDBKEY   (1 << 3)
6361 # define PR_PAC_APGAKEY   (1 << 4)
6362 #endif
6363 #ifndef PR_SET_TAGGED_ADDR_CTRL
6364 # define PR_SET_TAGGED_ADDR_CTRL 55
6365 # define PR_GET_TAGGED_ADDR_CTRL 56
6366 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6367 #endif
6368 #ifndef PR_MTE_TCF_SHIFT
6369 # define PR_MTE_TCF_SHIFT       1
6370 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6371 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6372 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6373 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6374 # define PR_MTE_TAG_SHIFT       3
6375 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6376 #endif
6377 #ifndef PR_SET_IO_FLUSHER
6378 # define PR_SET_IO_FLUSHER 57
6379 # define PR_GET_IO_FLUSHER 58
6380 #endif
6381 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6382 # define PR_SET_SYSCALL_USER_DISPATCH 59
6383 #endif
6384 
6385 #include "target_prctl.h"
6386 
6387 static abi_long do_prctl_inval0(CPUArchState *env)
6388 {
6389     return -TARGET_EINVAL;
6390 }
6391 
6392 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6393 {
6394     return -TARGET_EINVAL;
6395 }
6396 
6397 #ifndef do_prctl_get_fp_mode
6398 #define do_prctl_get_fp_mode do_prctl_inval0
6399 #endif
6400 #ifndef do_prctl_set_fp_mode
6401 #define do_prctl_set_fp_mode do_prctl_inval1
6402 #endif
6403 #ifndef do_prctl_get_vl
6404 #define do_prctl_get_vl do_prctl_inval0
6405 #endif
6406 #ifndef do_prctl_set_vl
6407 #define do_prctl_set_vl do_prctl_inval1
6408 #endif
6409 #ifndef do_prctl_reset_keys
6410 #define do_prctl_reset_keys do_prctl_inval1
6411 #endif
6412 #ifndef do_prctl_set_tagged_addr_ctrl
6413 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6414 #endif
6415 #ifndef do_prctl_get_tagged_addr_ctrl
6416 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6417 #endif
6418 #ifndef do_prctl_get_unalign
6419 #define do_prctl_get_unalign do_prctl_inval1
6420 #endif
6421 #ifndef do_prctl_set_unalign
6422 #define do_prctl_set_unalign do_prctl_inval1
6423 #endif
6424 
6425 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6426                          abi_long arg3, abi_long arg4, abi_long arg5)
6427 {
6428     abi_long ret;
6429 
6430     switch (option) {
6431     case PR_GET_PDEATHSIG:
6432         {
6433             int deathsig;
6434             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6435                                   arg3, arg4, arg5));
6436             if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
6437                 return -TARGET_EFAULT;
6438             }
6439             return ret;
6440         }
6441     case PR_GET_NAME:
6442         {
6443             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6444             if (!name) {
6445                 return -TARGET_EFAULT;
6446             }
6447             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6448                                   arg3, arg4, arg5));
6449             unlock_user(name, arg2, 16);
6450             return ret;
6451         }
6452     case PR_SET_NAME:
6453         {
6454             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6455             if (!name) {
6456                 return -TARGET_EFAULT;
6457             }
6458             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6459                                   arg3, arg4, arg5));
6460             unlock_user(name, arg2, 0);
6461             return ret;
6462         }
6463     case PR_GET_FP_MODE:
6464         return do_prctl_get_fp_mode(env);
6465     case PR_SET_FP_MODE:
6466         return do_prctl_set_fp_mode(env, arg2);
6467     case PR_SVE_GET_VL:
6468         return do_prctl_get_vl(env);
6469     case PR_SVE_SET_VL:
6470         return do_prctl_set_vl(env, arg2);
6471     case PR_PAC_RESET_KEYS:
6472         if (arg3 || arg4 || arg5) {
6473             return -TARGET_EINVAL;
6474         }
6475         return do_prctl_reset_keys(env, arg2);
6476     case PR_SET_TAGGED_ADDR_CTRL:
6477         if (arg3 || arg4 || arg5) {
6478             return -TARGET_EINVAL;
6479         }
6480         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6481     case PR_GET_TAGGED_ADDR_CTRL:
6482         if (arg2 || arg3 || arg4 || arg5) {
6483             return -TARGET_EINVAL;
6484         }
6485         return do_prctl_get_tagged_addr_ctrl(env);
6486 
6487     case PR_GET_UNALIGN:
6488         return do_prctl_get_unalign(env, arg2);
6489     case PR_SET_UNALIGN:
6490         return do_prctl_set_unalign(env, arg2);
6491 
6492     case PR_GET_DUMPABLE:
6493     case PR_SET_DUMPABLE:
6494     case PR_GET_KEEPCAPS:
6495     case PR_SET_KEEPCAPS:
6496     case PR_GET_TIMING:
6497     case PR_SET_TIMING:
6498     case PR_GET_TIMERSLACK:
6499     case PR_SET_TIMERSLACK:
6500     case PR_MCE_KILL:
6501     case PR_MCE_KILL_GET:
6502     case PR_GET_NO_NEW_PRIVS:
6503     case PR_SET_NO_NEW_PRIVS:
6504     case PR_GET_IO_FLUSHER:
6505     case PR_SET_IO_FLUSHER:
6506         /* Some prctl options have no pointer arguments and we can pass on. */
6507         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6508 
6509     case PR_GET_CHILD_SUBREAPER:
6510     case PR_SET_CHILD_SUBREAPER:
6511     case PR_GET_SPECULATION_CTRL:
6512     case PR_SET_SPECULATION_CTRL:
6513     case PR_GET_TID_ADDRESS:
6514         /* TODO */
6515         return -TARGET_EINVAL;
6516 
6517     case PR_GET_FPEXC:
6518     case PR_SET_FPEXC:
6519         /* Was used for SPE on PowerPC. */
6520         return -TARGET_EINVAL;
6521 
6522     case PR_GET_ENDIAN:
6523     case PR_SET_ENDIAN:
6524     case PR_GET_FPEMU:
6525     case PR_SET_FPEMU:
6526     case PR_SET_MM:
6527     case PR_GET_SECCOMP:
6528     case PR_SET_SECCOMP:
6529     case PR_SET_SYSCALL_USER_DISPATCH:
6530     case PR_GET_THP_DISABLE:
6531     case PR_SET_THP_DISABLE:
6532     case PR_GET_TSC:
6533     case PR_SET_TSC:
6534         /* Disable to prevent the target disabling stuff we need. */
6535         return -TARGET_EINVAL;
6536 
6537     default:
6538         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6539                       option);
6540         return -TARGET_EINVAL;
6541     }
6542 }
6543 
6544 #define NEW_STACK_SIZE 0x40000
6545 
6546 
6547 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6548 typedef struct {
6549     CPUArchState *env;
6550     pthread_mutex_t mutex;
6551     pthread_cond_t cond;
6552     pthread_t thread;
6553     uint32_t tid;
6554     abi_ulong child_tidptr;
6555     abi_ulong parent_tidptr;
6556     sigset_t sigmask;
6557 } new_thread_info;
6558 
6559 static void *clone_func(void *arg)
6560 {
6561     new_thread_info *info = arg;
6562     CPUArchState *env;
6563     CPUState *cpu;
6564     TaskState *ts;
6565 
6566     rcu_register_thread();
6567     tcg_register_thread();
6568     env = info->env;
6569     cpu = env_cpu(env);
6570     thread_cpu = cpu;
6571     ts = (TaskState *)cpu->opaque;
6572     info->tid = sys_gettid();
6573     task_settid(ts);
6574     if (info->child_tidptr)
6575         put_user_u32(info->tid, info->child_tidptr);
6576     if (info->parent_tidptr)
6577         put_user_u32(info->tid, info->parent_tidptr);
6578     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6579     /* Enable signals.  */
6580     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6581     /* Signal to the parent that we're ready.  */
6582     pthread_mutex_lock(&info->mutex);
6583     pthread_cond_broadcast(&info->cond);
6584     pthread_mutex_unlock(&info->mutex);
6585     /* Wait until the parent has finished initializing the tls state.  */
6586     pthread_mutex_lock(&clone_lock);
6587     pthread_mutex_unlock(&clone_lock);
6588     cpu_loop(env);
6589     /* never exits */
6590     return NULL;
6591 }
6592 
6593 /* do_fork() Must return host values and target errnos (unlike most
6594    do_*() functions). */
6595 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6596                    abi_ulong parent_tidptr, target_ulong newtls,
6597                    abi_ulong child_tidptr)
6598 {
6599     CPUState *cpu = env_cpu(env);
6600     int ret;
6601     TaskState *ts;
6602     CPUState *new_cpu;
6603     CPUArchState *new_env;
6604     sigset_t sigmask;
6605 
6606     flags &= ~CLONE_IGNORED_FLAGS;
6607 
6608     /* Emulate vfork() with fork() */
6609     if (flags & CLONE_VFORK)
6610         flags &= ~(CLONE_VFORK | CLONE_VM);
6611 
6612     if (flags & CLONE_VM) {
6613         TaskState *parent_ts = (TaskState *)cpu->opaque;
6614         new_thread_info info;
6615         pthread_attr_t attr;
6616 
6617         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6618             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6619             return -TARGET_EINVAL;
6620         }
6621 
6622         ts = g_new0(TaskState, 1);
6623         init_task_state(ts);
6624 
6625         /* Grab a mutex so that thread setup appears atomic.  */
6626         pthread_mutex_lock(&clone_lock);
6627 
6628         /*
6629          * If this is our first additional thread, we need to ensure we
6630          * generate code for parallel execution and flush old translations.
6631          * Do this now so that the copy gets CF_PARALLEL too.
6632          */
6633         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6634             cpu->tcg_cflags |= CF_PARALLEL;
6635             tb_flush(cpu);
6636         }
6637 
6638         /* we create a new CPU instance. */
6639         new_env = cpu_copy(env);
6640         /* Init regs that differ from the parent.  */
6641         cpu_clone_regs_child(new_env, newsp, flags);
6642         cpu_clone_regs_parent(env, flags);
6643         new_cpu = env_cpu(new_env);
6644         new_cpu->opaque = ts;
6645         ts->bprm = parent_ts->bprm;
6646         ts->info = parent_ts->info;
6647         ts->signal_mask = parent_ts->signal_mask;
6648 
6649         if (flags & CLONE_CHILD_CLEARTID) {
6650             ts->child_tidptr = child_tidptr;
6651         }
6652 
6653         if (flags & CLONE_SETTLS) {
6654             cpu_set_tls (new_env, newtls);
6655         }
6656 
6657         memset(&info, 0, sizeof(info));
6658         pthread_mutex_init(&info.mutex, NULL);
6659         pthread_mutex_lock(&info.mutex);
6660         pthread_cond_init(&info.cond, NULL);
6661         info.env = new_env;
6662         if (flags & CLONE_CHILD_SETTID) {
6663             info.child_tidptr = child_tidptr;
6664         }
6665         if (flags & CLONE_PARENT_SETTID) {
6666             info.parent_tidptr = parent_tidptr;
6667         }
6668 
6669         ret = pthread_attr_init(&attr);
6670         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6671         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6672         /* It is not safe to deliver signals until the child has finished
6673            initializing, so temporarily block all signals.  */
6674         sigfillset(&sigmask);
6675         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6676         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6677 
6678         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6679         /* TODO: Free new CPU state if thread creation failed.  */
6680 
6681         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6682         pthread_attr_destroy(&attr);
6683         if (ret == 0) {
6684             /* Wait for the child to initialize.  */
6685             pthread_cond_wait(&info.cond, &info.mutex);
6686             ret = info.tid;
6687         } else {
6688             ret = -1;
6689         }
6690         pthread_mutex_unlock(&info.mutex);
6691         pthread_cond_destroy(&info.cond);
6692         pthread_mutex_destroy(&info.mutex);
6693         pthread_mutex_unlock(&clone_lock);
6694     } else {
6695         /* if no CLONE_VM, we consider it is a fork */
6696         if (flags & CLONE_INVALID_FORK_FLAGS) {
6697             return -TARGET_EINVAL;
6698         }
6699 
6700         /* We can't support custom termination signals */
6701         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6702             return -TARGET_EINVAL;
6703         }
6704 
6705         if (block_signals()) {
6706             return -QEMU_ERESTARTSYS;
6707         }
6708 
6709         fork_start();
6710         ret = fork();
6711         if (ret == 0) {
6712             /* Child Process.  */
6713             cpu_clone_regs_child(env, newsp, flags);
6714             fork_end(1);
6715             /* There is a race condition here.  The parent process could
6716                theoretically read the TID in the child process before the child
6717                tid is set.  This would require using either ptrace
6718                (not implemented) or having *_tidptr to point at a shared memory
6719                mapping.  We can't repeat the spinlock hack used above because
6720                the child process gets its own copy of the lock.  */
6721             if (flags & CLONE_CHILD_SETTID)
6722                 put_user_u32(sys_gettid(), child_tidptr);
6723             if (flags & CLONE_PARENT_SETTID)
6724                 put_user_u32(sys_gettid(), parent_tidptr);
6725             ts = (TaskState *)cpu->opaque;
6726             if (flags & CLONE_SETTLS)
6727                 cpu_set_tls (env, newtls);
6728             if (flags & CLONE_CHILD_CLEARTID)
6729                 ts->child_tidptr = child_tidptr;
6730         } else {
6731             cpu_clone_regs_parent(env, flags);
6732             fork_end(0);
6733         }
6734     }
6735     return ret;
6736 }
6737 
6738 /* warning : doesn't handle linux specific flags... */
6739 static int target_to_host_fcntl_cmd(int cmd)
6740 {
6741     int ret;
6742 
6743     switch(cmd) {
6744     case TARGET_F_DUPFD:
6745     case TARGET_F_GETFD:
6746     case TARGET_F_SETFD:
6747     case TARGET_F_GETFL:
6748     case TARGET_F_SETFL:
6749     case TARGET_F_OFD_GETLK:
6750     case TARGET_F_OFD_SETLK:
6751     case TARGET_F_OFD_SETLKW:
6752         ret = cmd;
6753         break;
6754     case TARGET_F_GETLK:
6755         ret = F_GETLK64;
6756         break;
6757     case TARGET_F_SETLK:
6758         ret = F_SETLK64;
6759         break;
6760     case TARGET_F_SETLKW:
6761         ret = F_SETLKW64;
6762         break;
6763     case TARGET_F_GETOWN:
6764         ret = F_GETOWN;
6765         break;
6766     case TARGET_F_SETOWN:
6767         ret = F_SETOWN;
6768         break;
6769     case TARGET_F_GETSIG:
6770         ret = F_GETSIG;
6771         break;
6772     case TARGET_F_SETSIG:
6773         ret = F_SETSIG;
6774         break;
6775 #if TARGET_ABI_BITS == 32
6776     case TARGET_F_GETLK64:
6777         ret = F_GETLK64;
6778         break;
6779     case TARGET_F_SETLK64:
6780         ret = F_SETLK64;
6781         break;
6782     case TARGET_F_SETLKW64:
6783         ret = F_SETLKW64;
6784         break;
6785 #endif
6786     case TARGET_F_SETLEASE:
6787         ret = F_SETLEASE;
6788         break;
6789     case TARGET_F_GETLEASE:
6790         ret = F_GETLEASE;
6791         break;
6792 #ifdef F_DUPFD_CLOEXEC
6793     case TARGET_F_DUPFD_CLOEXEC:
6794         ret = F_DUPFD_CLOEXEC;
6795         break;
6796 #endif
6797     case TARGET_F_NOTIFY:
6798         ret = F_NOTIFY;
6799         break;
6800 #ifdef F_GETOWN_EX
6801     case TARGET_F_GETOWN_EX:
6802         ret = F_GETOWN_EX;
6803         break;
6804 #endif
6805 #ifdef F_SETOWN_EX
6806     case TARGET_F_SETOWN_EX:
6807         ret = F_SETOWN_EX;
6808         break;
6809 #endif
6810 #ifdef F_SETPIPE_SZ
6811     case TARGET_F_SETPIPE_SZ:
6812         ret = F_SETPIPE_SZ;
6813         break;
6814     case TARGET_F_GETPIPE_SZ:
6815         ret = F_GETPIPE_SZ;
6816         break;
6817 #endif
6818 #ifdef F_ADD_SEALS
6819     case TARGET_F_ADD_SEALS:
6820         ret = F_ADD_SEALS;
6821         break;
6822     case TARGET_F_GET_SEALS:
6823         ret = F_GET_SEALS;
6824         break;
6825 #endif
6826     default:
6827         ret = -TARGET_EINVAL;
6828         break;
6829     }
6830 
6831 #if defined(__powerpc64__)
6832     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6833      * is not supported by kernel. The glibc fcntl call actually adjusts
6834      * them to 5, 6 and 7 before making the syscall(). Since we make the
6835      * syscall directly, adjust to what is supported by the kernel.
6836      */
6837     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6838         ret -= F_GETLK64 - 5;
6839     }
6840 #endif
6841 
6842     return ret;
6843 }
6844 
6845 #define FLOCK_TRANSTBL \
6846     switch (type) { \
6847     TRANSTBL_CONVERT(F_RDLCK); \
6848     TRANSTBL_CONVERT(F_WRLCK); \
6849     TRANSTBL_CONVERT(F_UNLCK); \
6850     }
6851 
6852 static int target_to_host_flock(int type)
6853 {
6854 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6855     FLOCK_TRANSTBL
6856 #undef  TRANSTBL_CONVERT
6857     return -TARGET_EINVAL;
6858 }
6859 
6860 static int host_to_target_flock(int type)
6861 {
6862 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6863     FLOCK_TRANSTBL
6864 #undef  TRANSTBL_CONVERT
6865     /* if we don't know how to convert the value coming
6866      * from the host we copy to the target field as-is
6867      */
6868     return type;
6869 }
6870 
6871 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6872                                             abi_ulong target_flock_addr)
6873 {
6874     struct target_flock *target_fl;
6875     int l_type;
6876 
6877     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6878         return -TARGET_EFAULT;
6879     }
6880 
6881     __get_user(l_type, &target_fl->l_type);
6882     l_type = target_to_host_flock(l_type);
6883     if (l_type < 0) {
6884         return l_type;
6885     }
6886     fl->l_type = l_type;
6887     __get_user(fl->l_whence, &target_fl->l_whence);
6888     __get_user(fl->l_start, &target_fl->l_start);
6889     __get_user(fl->l_len, &target_fl->l_len);
6890     __get_user(fl->l_pid, &target_fl->l_pid);
6891     unlock_user_struct(target_fl, target_flock_addr, 0);
6892     return 0;
6893 }
6894 
6895 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6896                                           const struct flock64 *fl)
6897 {
6898     struct target_flock *target_fl;
6899     short l_type;
6900 
6901     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6902         return -TARGET_EFAULT;
6903     }
6904 
6905     l_type = host_to_target_flock(fl->l_type);
6906     __put_user(l_type, &target_fl->l_type);
6907     __put_user(fl->l_whence, &target_fl->l_whence);
6908     __put_user(fl->l_start, &target_fl->l_start);
6909     __put_user(fl->l_len, &target_fl->l_len);
6910     __put_user(fl->l_pid, &target_fl->l_pid);
6911     unlock_user_struct(target_fl, target_flock_addr, 1);
6912     return 0;
6913 }
6914 
6915 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6916 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6917 
6918 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6919 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6920                                                    abi_ulong target_flock_addr)
6921 {
6922     struct target_oabi_flock64 *target_fl;
6923     int l_type;
6924 
6925     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6926         return -TARGET_EFAULT;
6927     }
6928 
6929     __get_user(l_type, &target_fl->l_type);
6930     l_type = target_to_host_flock(l_type);
6931     if (l_type < 0) {
6932         return l_type;
6933     }
6934     fl->l_type = l_type;
6935     __get_user(fl->l_whence, &target_fl->l_whence);
6936     __get_user(fl->l_start, &target_fl->l_start);
6937     __get_user(fl->l_len, &target_fl->l_len);
6938     __get_user(fl->l_pid, &target_fl->l_pid);
6939     unlock_user_struct(target_fl, target_flock_addr, 0);
6940     return 0;
6941 }
6942 
6943 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6944                                                  const struct flock64 *fl)
6945 {
6946     struct target_oabi_flock64 *target_fl;
6947     short l_type;
6948 
6949     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6950         return -TARGET_EFAULT;
6951     }
6952 
6953     l_type = host_to_target_flock(fl->l_type);
6954     __put_user(l_type, &target_fl->l_type);
6955     __put_user(fl->l_whence, &target_fl->l_whence);
6956     __put_user(fl->l_start, &target_fl->l_start);
6957     __put_user(fl->l_len, &target_fl->l_len);
6958     __put_user(fl->l_pid, &target_fl->l_pid);
6959     unlock_user_struct(target_fl, target_flock_addr, 1);
6960     return 0;
6961 }
6962 #endif
6963 
6964 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6965                                               abi_ulong target_flock_addr)
6966 {
6967     struct target_flock64 *target_fl;
6968     int l_type;
6969 
6970     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     __get_user(l_type, &target_fl->l_type);
6975     l_type = target_to_host_flock(l_type);
6976     if (l_type < 0) {
6977         return l_type;
6978     }
6979     fl->l_type = l_type;
6980     __get_user(fl->l_whence, &target_fl->l_whence);
6981     __get_user(fl->l_start, &target_fl->l_start);
6982     __get_user(fl->l_len, &target_fl->l_len);
6983     __get_user(fl->l_pid, &target_fl->l_pid);
6984     unlock_user_struct(target_fl, target_flock_addr, 0);
6985     return 0;
6986 }
6987 
6988 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6989                                             const struct flock64 *fl)
6990 {
6991     struct target_flock64 *target_fl;
6992     short l_type;
6993 
6994     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6995         return -TARGET_EFAULT;
6996     }
6997 
6998     l_type = host_to_target_flock(fl->l_type);
6999     __put_user(l_type, &target_fl->l_type);
7000     __put_user(fl->l_whence, &target_fl->l_whence);
7001     __put_user(fl->l_start, &target_fl->l_start);
7002     __put_user(fl->l_len, &target_fl->l_len);
7003     __put_user(fl->l_pid, &target_fl->l_pid);
7004     unlock_user_struct(target_fl, target_flock_addr, 1);
7005     return 0;
7006 }
7007 
7008 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7009 {
7010     struct flock64 fl64;
7011 #ifdef F_GETOWN_EX
7012     struct f_owner_ex fox;
7013     struct target_f_owner_ex *target_fox;
7014 #endif
7015     abi_long ret;
7016     int host_cmd = target_to_host_fcntl_cmd(cmd);
7017 
7018     if (host_cmd == -TARGET_EINVAL)
7019 	    return host_cmd;
7020 
7021     switch(cmd) {
7022     case TARGET_F_GETLK:
7023         ret = copy_from_user_flock(&fl64, arg);
7024         if (ret) {
7025             return ret;
7026         }
7027         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7028         if (ret == 0) {
7029             ret = copy_to_user_flock(arg, &fl64);
7030         }
7031         break;
7032 
7033     case TARGET_F_SETLK:
7034     case TARGET_F_SETLKW:
7035         ret = copy_from_user_flock(&fl64, arg);
7036         if (ret) {
7037             return ret;
7038         }
7039         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7040         break;
7041 
7042     case TARGET_F_GETLK64:
7043     case TARGET_F_OFD_GETLK:
7044         ret = copy_from_user_flock64(&fl64, arg);
7045         if (ret) {
7046             return ret;
7047         }
7048         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7049         if (ret == 0) {
7050             ret = copy_to_user_flock64(arg, &fl64);
7051         }
7052         break;
7053     case TARGET_F_SETLK64:
7054     case TARGET_F_SETLKW64:
7055     case TARGET_F_OFD_SETLK:
7056     case TARGET_F_OFD_SETLKW:
7057         ret = copy_from_user_flock64(&fl64, arg);
7058         if (ret) {
7059             return ret;
7060         }
7061         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7062         break;
7063 
7064     case TARGET_F_GETFL:
7065         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7066         if (ret >= 0) {
7067             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7068         }
7069         break;
7070 
7071     case TARGET_F_SETFL:
7072         ret = get_errno(safe_fcntl(fd, host_cmd,
7073                                    target_to_host_bitmask(arg,
7074                                                           fcntl_flags_tbl)));
7075         break;
7076 
7077 #ifdef F_GETOWN_EX
7078     case TARGET_F_GETOWN_EX:
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7080         if (ret >= 0) {
7081             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7082                 return -TARGET_EFAULT;
7083             target_fox->type = tswap32(fox.type);
7084             target_fox->pid = tswap32(fox.pid);
7085             unlock_user_struct(target_fox, arg, 1);
7086         }
7087         break;
7088 #endif
7089 
7090 #ifdef F_SETOWN_EX
7091     case TARGET_F_SETOWN_EX:
7092         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7093             return -TARGET_EFAULT;
7094         fox.type = tswap32(target_fox->type);
7095         fox.pid = tswap32(target_fox->pid);
7096         unlock_user_struct(target_fox, arg, 0);
7097         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7098         break;
7099 #endif
7100 
7101     case TARGET_F_SETSIG:
7102         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7103         break;
7104 
7105     case TARGET_F_GETSIG:
7106         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7107         break;
7108 
7109     case TARGET_F_SETOWN:
7110     case TARGET_F_GETOWN:
7111     case TARGET_F_SETLEASE:
7112     case TARGET_F_GETLEASE:
7113     case TARGET_F_SETPIPE_SZ:
7114     case TARGET_F_GETPIPE_SZ:
7115     case TARGET_F_ADD_SEALS:
7116     case TARGET_F_GET_SEALS:
7117         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7118         break;
7119 
7120     default:
7121         ret = get_errno(safe_fcntl(fd, cmd, arg));
7122         break;
7123     }
7124     return ret;
7125 }
7126 
7127 #ifdef USE_UID16
7128 
7129 static inline int high2lowuid(int uid)
7130 {
7131     if (uid > 65535)
7132         return 65534;
7133     else
7134         return uid;
7135 }
7136 
7137 static inline int high2lowgid(int gid)
7138 {
7139     if (gid > 65535)
7140         return 65534;
7141     else
7142         return gid;
7143 }
7144 
7145 static inline int low2highuid(int uid)
7146 {
7147     if ((int16_t)uid == -1)
7148         return -1;
7149     else
7150         return uid;
7151 }
7152 
7153 static inline int low2highgid(int gid)
7154 {
7155     if ((int16_t)gid == -1)
7156         return -1;
7157     else
7158         return gid;
7159 }
7160 static inline int tswapid(int id)
7161 {
7162     return tswap16(id);
7163 }
7164 
7165 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7166 
7167 #else /* !USE_UID16 */
7168 static inline int high2lowuid(int uid)
7169 {
7170     return uid;
7171 }
7172 static inline int high2lowgid(int gid)
7173 {
7174     return gid;
7175 }
7176 static inline int low2highuid(int uid)
7177 {
7178     return uid;
7179 }
7180 static inline int low2highgid(int gid)
7181 {
7182     return gid;
7183 }
7184 static inline int tswapid(int id)
7185 {
7186     return tswap32(id);
7187 }
7188 
7189 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7190 
7191 #endif /* USE_UID16 */
7192 
7193 /* We must do direct syscalls for setting UID/GID, because we want to
7194  * implement the Linux system call semantics of "change only for this thread",
7195  * not the libc/POSIX semantics of "change for all threads in process".
7196  * (See http://ewontfix.com/17/ for more details.)
7197  * We use the 32-bit version of the syscalls if present; if it is not
7198  * then either the host architecture supports 32-bit UIDs natively with
7199  * the standard syscall, or the 16-bit UID is the best we can do.
7200  */
7201 #ifdef __NR_setuid32
7202 #define __NR_sys_setuid __NR_setuid32
7203 #else
7204 #define __NR_sys_setuid __NR_setuid
7205 #endif
7206 #ifdef __NR_setgid32
7207 #define __NR_sys_setgid __NR_setgid32
7208 #else
7209 #define __NR_sys_setgid __NR_setgid
7210 #endif
7211 #ifdef __NR_setresuid32
7212 #define __NR_sys_setresuid __NR_setresuid32
7213 #else
7214 #define __NR_sys_setresuid __NR_setresuid
7215 #endif
7216 #ifdef __NR_setresgid32
7217 #define __NR_sys_setresgid __NR_setresgid32
7218 #else
7219 #define __NR_sys_setresgid __NR_setresgid
7220 #endif
7221 
7222 _syscall1(int, sys_setuid, uid_t, uid)
7223 _syscall1(int, sys_setgid, gid_t, gid)
7224 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7225 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7226 
7227 void syscall_init(void)
7228 {
7229     IOCTLEntry *ie;
7230     const argtype *arg_type;
7231     int size;
7232 
7233     thunk_init(STRUCT_MAX);
7234 
7235 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7236 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7237 #include "syscall_types.h"
7238 #undef STRUCT
7239 #undef STRUCT_SPECIAL
7240 
7241     /* we patch the ioctl size if necessary. We rely on the fact that
7242        no ioctl has all the bits at '1' in the size field */
7243     ie = ioctl_entries;
7244     while (ie->target_cmd != 0) {
7245         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7246             TARGET_IOC_SIZEMASK) {
7247             arg_type = ie->arg_type;
7248             if (arg_type[0] != TYPE_PTR) {
7249                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7250                         ie->target_cmd);
7251                 exit(1);
7252             }
7253             arg_type++;
7254             size = thunk_type_size(arg_type, 0);
7255             ie->target_cmd = (ie->target_cmd &
7256                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7257                 (size << TARGET_IOC_SIZESHIFT);
7258         }
7259 
7260         /* automatic consistency check if same arch */
7261 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7262     (defined(__x86_64__) && defined(TARGET_X86_64))
7263         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7264             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7265                     ie->name, ie->target_cmd, ie->host_cmd);
7266         }
7267 #endif
7268         ie++;
7269     }
7270 }
7271 
7272 #ifdef TARGET_NR_truncate64
7273 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7274                                          abi_long arg2,
7275                                          abi_long arg3,
7276                                          abi_long arg4)
7277 {
7278     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7279         arg2 = arg3;
7280         arg3 = arg4;
7281     }
7282     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7283 }
7284 #endif
7285 
7286 #ifdef TARGET_NR_ftruncate64
7287 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7288                                           abi_long arg2,
7289                                           abi_long arg3,
7290                                           abi_long arg4)
7291 {
7292     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7293         arg2 = arg3;
7294         arg3 = arg4;
7295     }
7296     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7297 }
7298 #endif
7299 
7300 #if defined(TARGET_NR_timer_settime) || \
7301     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7302 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7303                                                  abi_ulong target_addr)
7304 {
7305     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7306                                 offsetof(struct target_itimerspec,
7307                                          it_interval)) ||
7308         target_to_host_timespec(&host_its->it_value, target_addr +
7309                                 offsetof(struct target_itimerspec,
7310                                          it_value))) {
7311         return -TARGET_EFAULT;
7312     }
7313 
7314     return 0;
7315 }
7316 #endif
7317 
7318 #if defined(TARGET_NR_timer_settime64) || \
7319     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7320 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7321                                                    abi_ulong target_addr)
7322 {
7323     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7324                                   offsetof(struct target__kernel_itimerspec,
7325                                            it_interval)) ||
7326         target_to_host_timespec64(&host_its->it_value, target_addr +
7327                                   offsetof(struct target__kernel_itimerspec,
7328                                            it_value))) {
7329         return -TARGET_EFAULT;
7330     }
7331 
7332     return 0;
7333 }
7334 #endif
7335 
7336 #if ((defined(TARGET_NR_timerfd_gettime) || \
7337       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7338       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7339 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7340                                                  struct itimerspec *host_its)
7341 {
7342     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7343                                                        it_interval),
7344                                 &host_its->it_interval) ||
7345         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7346                                                        it_value),
7347                                 &host_its->it_value)) {
7348         return -TARGET_EFAULT;
7349     }
7350     return 0;
7351 }
7352 #endif
7353 
7354 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7355       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7356       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7357 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7358                                                    struct itimerspec *host_its)
7359 {
7360     if (host_to_target_timespec64(target_addr +
7361                                   offsetof(struct target__kernel_itimerspec,
7362                                            it_interval),
7363                                   &host_its->it_interval) ||
7364         host_to_target_timespec64(target_addr +
7365                                   offsetof(struct target__kernel_itimerspec,
7366                                            it_value),
7367                                   &host_its->it_value)) {
7368         return -TARGET_EFAULT;
7369     }
7370     return 0;
7371 }
7372 #endif
7373 
7374 #if defined(TARGET_NR_adjtimex) || \
7375     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7376 static inline abi_long target_to_host_timex(struct timex *host_tx,
7377                                             abi_long target_addr)
7378 {
7379     struct target_timex *target_tx;
7380 
7381     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     __get_user(host_tx->modes, &target_tx->modes);
7386     __get_user(host_tx->offset, &target_tx->offset);
7387     __get_user(host_tx->freq, &target_tx->freq);
7388     __get_user(host_tx->maxerror, &target_tx->maxerror);
7389     __get_user(host_tx->esterror, &target_tx->esterror);
7390     __get_user(host_tx->status, &target_tx->status);
7391     __get_user(host_tx->constant, &target_tx->constant);
7392     __get_user(host_tx->precision, &target_tx->precision);
7393     __get_user(host_tx->tolerance, &target_tx->tolerance);
7394     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7395     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7396     __get_user(host_tx->tick, &target_tx->tick);
7397     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7398     __get_user(host_tx->jitter, &target_tx->jitter);
7399     __get_user(host_tx->shift, &target_tx->shift);
7400     __get_user(host_tx->stabil, &target_tx->stabil);
7401     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7402     __get_user(host_tx->calcnt, &target_tx->calcnt);
7403     __get_user(host_tx->errcnt, &target_tx->errcnt);
7404     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7405     __get_user(host_tx->tai, &target_tx->tai);
7406 
7407     unlock_user_struct(target_tx, target_addr, 0);
7408     return 0;
7409 }
7410 
7411 static inline abi_long host_to_target_timex(abi_long target_addr,
7412                                             struct timex *host_tx)
7413 {
7414     struct target_timex *target_tx;
7415 
7416     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7417         return -TARGET_EFAULT;
7418     }
7419 
7420     __put_user(host_tx->modes, &target_tx->modes);
7421     __put_user(host_tx->offset, &target_tx->offset);
7422     __put_user(host_tx->freq, &target_tx->freq);
7423     __put_user(host_tx->maxerror, &target_tx->maxerror);
7424     __put_user(host_tx->esterror, &target_tx->esterror);
7425     __put_user(host_tx->status, &target_tx->status);
7426     __put_user(host_tx->constant, &target_tx->constant);
7427     __put_user(host_tx->precision, &target_tx->precision);
7428     __put_user(host_tx->tolerance, &target_tx->tolerance);
7429     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7430     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7431     __put_user(host_tx->tick, &target_tx->tick);
7432     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7433     __put_user(host_tx->jitter, &target_tx->jitter);
7434     __put_user(host_tx->shift, &target_tx->shift);
7435     __put_user(host_tx->stabil, &target_tx->stabil);
7436     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7437     __put_user(host_tx->calcnt, &target_tx->calcnt);
7438     __put_user(host_tx->errcnt, &target_tx->errcnt);
7439     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7440     __put_user(host_tx->tai, &target_tx->tai);
7441 
7442     unlock_user_struct(target_tx, target_addr, 1);
7443     return 0;
7444 }
7445 #endif
7446 
7447 
7448 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7449 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7450                                               abi_long target_addr)
7451 {
7452     struct target__kernel_timex *target_tx;
7453 
7454     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7455                                  offsetof(struct target__kernel_timex,
7456                                           time))) {
7457         return -TARGET_EFAULT;
7458     }
7459 
7460     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7461         return -TARGET_EFAULT;
7462     }
7463 
7464     __get_user(host_tx->modes, &target_tx->modes);
7465     __get_user(host_tx->offset, &target_tx->offset);
7466     __get_user(host_tx->freq, &target_tx->freq);
7467     __get_user(host_tx->maxerror, &target_tx->maxerror);
7468     __get_user(host_tx->esterror, &target_tx->esterror);
7469     __get_user(host_tx->status, &target_tx->status);
7470     __get_user(host_tx->constant, &target_tx->constant);
7471     __get_user(host_tx->precision, &target_tx->precision);
7472     __get_user(host_tx->tolerance, &target_tx->tolerance);
7473     __get_user(host_tx->tick, &target_tx->tick);
7474     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7475     __get_user(host_tx->jitter, &target_tx->jitter);
7476     __get_user(host_tx->shift, &target_tx->shift);
7477     __get_user(host_tx->stabil, &target_tx->stabil);
7478     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7479     __get_user(host_tx->calcnt, &target_tx->calcnt);
7480     __get_user(host_tx->errcnt, &target_tx->errcnt);
7481     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7482     __get_user(host_tx->tai, &target_tx->tai);
7483 
7484     unlock_user_struct(target_tx, target_addr, 0);
7485     return 0;
7486 }
7487 
7488 static inline abi_long host_to_target_timex64(abi_long target_addr,
7489                                               struct timex *host_tx)
7490 {
7491     struct target__kernel_timex *target_tx;
7492 
7493    if (copy_to_user_timeval64(target_addr +
7494                               offsetof(struct target__kernel_timex, time),
7495                               &host_tx->time)) {
7496         return -TARGET_EFAULT;
7497     }
7498 
7499     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7500         return -TARGET_EFAULT;
7501     }
7502 
7503     __put_user(host_tx->modes, &target_tx->modes);
7504     __put_user(host_tx->offset, &target_tx->offset);
7505     __put_user(host_tx->freq, &target_tx->freq);
7506     __put_user(host_tx->maxerror, &target_tx->maxerror);
7507     __put_user(host_tx->esterror, &target_tx->esterror);
7508     __put_user(host_tx->status, &target_tx->status);
7509     __put_user(host_tx->constant, &target_tx->constant);
7510     __put_user(host_tx->precision, &target_tx->precision);
7511     __put_user(host_tx->tolerance, &target_tx->tolerance);
7512     __put_user(host_tx->tick, &target_tx->tick);
7513     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7514     __put_user(host_tx->jitter, &target_tx->jitter);
7515     __put_user(host_tx->shift, &target_tx->shift);
7516     __put_user(host_tx->stabil, &target_tx->stabil);
7517     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7518     __put_user(host_tx->calcnt, &target_tx->calcnt);
7519     __put_user(host_tx->errcnt, &target_tx->errcnt);
7520     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7521     __put_user(host_tx->tai, &target_tx->tai);
7522 
7523     unlock_user_struct(target_tx, target_addr, 1);
7524     return 0;
7525 }
7526 #endif
7527 
7528 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7529 #define sigev_notify_thread_id _sigev_un._tid
7530 #endif
7531 
7532 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7533                                                abi_ulong target_addr)
7534 {
7535     struct target_sigevent *target_sevp;
7536 
7537     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7538         return -TARGET_EFAULT;
7539     }
7540 
7541     /* This union is awkward on 64 bit systems because it has a 32 bit
7542      * integer and a pointer in it; we follow the conversion approach
7543      * used for handling sigval types in signal.c so the guest should get
7544      * the correct value back even if we did a 64 bit byteswap and it's
7545      * using the 32 bit integer.
7546      */
7547     host_sevp->sigev_value.sival_ptr =
7548         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7549     host_sevp->sigev_signo =
7550         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7551     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7552     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7553 
7554     unlock_user_struct(target_sevp, target_addr, 1);
7555     return 0;
7556 }
7557 
7558 #if defined(TARGET_NR_mlockall)
7559 static inline int target_to_host_mlockall_arg(int arg)
7560 {
7561     int result = 0;
7562 
7563     if (arg & TARGET_MCL_CURRENT) {
7564         result |= MCL_CURRENT;
7565     }
7566     if (arg & TARGET_MCL_FUTURE) {
7567         result |= MCL_FUTURE;
7568     }
7569 #ifdef MCL_ONFAULT
7570     if (arg & TARGET_MCL_ONFAULT) {
7571         result |= MCL_ONFAULT;
7572     }
7573 #endif
7574 
7575     return result;
7576 }
7577 #endif
7578 
7579 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7580      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7581      defined(TARGET_NR_newfstatat))
7582 static inline abi_long host_to_target_stat64(void *cpu_env,
7583                                              abi_ulong target_addr,
7584                                              struct stat *host_st)
7585 {
7586 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7587     if (((CPUARMState *)cpu_env)->eabi) {
7588         struct target_eabi_stat64 *target_st;
7589 
7590         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7591             return -TARGET_EFAULT;
7592         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7593         __put_user(host_st->st_dev, &target_st->st_dev);
7594         __put_user(host_st->st_ino, &target_st->st_ino);
7595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7596         __put_user(host_st->st_ino, &target_st->__st_ino);
7597 #endif
7598         __put_user(host_st->st_mode, &target_st->st_mode);
7599         __put_user(host_st->st_nlink, &target_st->st_nlink);
7600         __put_user(host_st->st_uid, &target_st->st_uid);
7601         __put_user(host_st->st_gid, &target_st->st_gid);
7602         __put_user(host_st->st_rdev, &target_st->st_rdev);
7603         __put_user(host_st->st_size, &target_st->st_size);
7604         __put_user(host_st->st_blksize, &target_st->st_blksize);
7605         __put_user(host_st->st_blocks, &target_st->st_blocks);
7606         __put_user(host_st->st_atime, &target_st->target_st_atime);
7607         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7608         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7609 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7610         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7611         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7612         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7613 #endif
7614         unlock_user_struct(target_st, target_addr, 1);
7615     } else
7616 #endif
7617     {
7618 #if defined(TARGET_HAS_STRUCT_STAT64)
7619         struct target_stat64 *target_st;
7620 #else
7621         struct target_stat *target_st;
7622 #endif
7623 
7624         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7625             return -TARGET_EFAULT;
7626         memset(target_st, 0, sizeof(*target_st));
7627         __put_user(host_st->st_dev, &target_st->st_dev);
7628         __put_user(host_st->st_ino, &target_st->st_ino);
7629 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7630         __put_user(host_st->st_ino, &target_st->__st_ino);
7631 #endif
7632         __put_user(host_st->st_mode, &target_st->st_mode);
7633         __put_user(host_st->st_nlink, &target_st->st_nlink);
7634         __put_user(host_st->st_uid, &target_st->st_uid);
7635         __put_user(host_st->st_gid, &target_st->st_gid);
7636         __put_user(host_st->st_rdev, &target_st->st_rdev);
7637         /* XXX: better use of kernel struct */
7638         __put_user(host_st->st_size, &target_st->st_size);
7639         __put_user(host_st->st_blksize, &target_st->st_blksize);
7640         __put_user(host_st->st_blocks, &target_st->st_blocks);
7641         __put_user(host_st->st_atime, &target_st->target_st_atime);
7642         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7643         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7644 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7645         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7646         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7647         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7648 #endif
7649         unlock_user_struct(target_st, target_addr, 1);
7650     }
7651 
7652     return 0;
7653 }
7654 #endif
7655 
7656 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7657 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7658                                             abi_ulong target_addr)
7659 {
7660     struct target_statx *target_stx;
7661 
7662     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7663         return -TARGET_EFAULT;
7664     }
7665     memset(target_stx, 0, sizeof(*target_stx));
7666 
7667     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7668     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7669     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7670     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7671     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7672     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7673     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7674     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7675     __put_user(host_stx->stx_size, &target_stx->stx_size);
7676     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7677     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7678     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7679     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7680     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7681     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7682     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7683     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7684     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7685     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7686     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7687     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7688     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7689     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7690 
7691     unlock_user_struct(target_stx, target_addr, 1);
7692 
7693     return 0;
7694 }
7695 #endif
7696 
7697 static int do_sys_futex(int *uaddr, int op, int val,
7698                          const struct timespec *timeout, int *uaddr2,
7699                          int val3)
7700 {
7701 #if HOST_LONG_BITS == 64
7702 #if defined(__NR_futex)
7703     /* always a 64-bit time_t, it doesn't define _time64 version  */
7704     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7705 
7706 #endif
7707 #else /* HOST_LONG_BITS == 64 */
7708 #if defined(__NR_futex_time64)
7709     if (sizeof(timeout->tv_sec) == 8) {
7710         /* _time64 function on 32bit arch */
7711         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7712     }
7713 #endif
7714 #if defined(__NR_futex)
7715     /* old function on 32bit arch */
7716     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7717 #endif
7718 #endif /* HOST_LONG_BITS == 64 */
7719     g_assert_not_reached();
7720 }
7721 
7722 static int do_safe_futex(int *uaddr, int op, int val,
7723                          const struct timespec *timeout, int *uaddr2,
7724                          int val3)
7725 {
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728     /* always a 64-bit time_t, it doesn't define _time64 version  */
7729     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7736                                            val3));
7737     }
7738 #endif
7739 #if defined(__NR_futex)
7740     /* old function on 32bit arch */
7741     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7742 #endif
7743 #endif /* HOST_LONG_BITS == 64 */
7744     return -TARGET_ENOSYS;
7745 }
7746 
7747 /* ??? Using host futex calls even when target atomic operations
7748    are not really atomic probably breaks things.  However implementing
7749    futexes locally would make futexes shared between multiple processes
7750    tricky.  However they're probably useless because guest atomic
7751    operations won't work either.  */
7752 #if defined(TARGET_NR_futex)
7753 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7754                     target_ulong timeout, target_ulong uaddr2, int val3)
7755 {
7756     struct timespec ts, *pts;
7757     int base_op;
7758 
7759     /* ??? We assume FUTEX_* constants are the same on both host
7760        and target.  */
7761 #ifdef FUTEX_CMD_MASK
7762     base_op = op & FUTEX_CMD_MASK;
7763 #else
7764     base_op = op;
7765 #endif
7766     switch (base_op) {
7767     case FUTEX_WAIT:
7768     case FUTEX_WAIT_BITSET:
7769         if (timeout) {
7770             pts = &ts;
7771             target_to_host_timespec(pts, timeout);
7772         } else {
7773             pts = NULL;
7774         }
7775         return do_safe_futex(g2h(cpu, uaddr),
7776                              op, tswap32(val), pts, NULL, val3);
7777     case FUTEX_WAKE:
7778         return do_safe_futex(g2h(cpu, uaddr),
7779                              op, val, NULL, NULL, 0);
7780     case FUTEX_FD:
7781         return do_safe_futex(g2h(cpu, uaddr),
7782                              op, val, NULL, NULL, 0);
7783     case FUTEX_REQUEUE:
7784     case FUTEX_CMP_REQUEUE:
7785     case FUTEX_WAKE_OP:
7786         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7787            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7788            But the prototype takes a `struct timespec *'; insert casts
7789            to satisfy the compiler.  We do not need to tswap TIMEOUT
7790            since it's not compared to guest memory.  */
7791         pts = (struct timespec *)(uintptr_t) timeout;
7792         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7793                              (base_op == FUTEX_CMP_REQUEUE
7794                               ? tswap32(val3) : val3));
7795     default:
7796         return -TARGET_ENOSYS;
7797     }
7798 }
7799 #endif
7800 
7801 #if defined(TARGET_NR_futex_time64)
7802 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7803                            int val, target_ulong timeout,
7804                            target_ulong uaddr2, int val3)
7805 {
7806     struct timespec ts, *pts;
7807     int base_op;
7808 
7809     /* ??? We assume FUTEX_* constants are the same on both host
7810        and target.  */
7811 #ifdef FUTEX_CMD_MASK
7812     base_op = op & FUTEX_CMD_MASK;
7813 #else
7814     base_op = op;
7815 #endif
7816     switch (base_op) {
7817     case FUTEX_WAIT:
7818     case FUTEX_WAIT_BITSET:
7819         if (timeout) {
7820             pts = &ts;
7821             if (target_to_host_timespec64(pts, timeout)) {
7822                 return -TARGET_EFAULT;
7823             }
7824         } else {
7825             pts = NULL;
7826         }
7827         return do_safe_futex(g2h(cpu, uaddr), op,
7828                              tswap32(val), pts, NULL, val3);
7829     case FUTEX_WAKE:
7830         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7831     case FUTEX_FD:
7832         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7833     case FUTEX_REQUEUE:
7834     case FUTEX_CMP_REQUEUE:
7835     case FUTEX_WAKE_OP:
7836         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7837            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7838            But the prototype takes a `struct timespec *'; insert casts
7839            to satisfy the compiler.  We do not need to tswap TIMEOUT
7840            since it's not compared to guest memory.  */
7841         pts = (struct timespec *)(uintptr_t) timeout;
7842         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7843                              (base_op == FUTEX_CMP_REQUEUE
7844                               ? tswap32(val3) : val3));
7845     default:
7846         return -TARGET_ENOSYS;
7847     }
7848 }
7849 #endif
7850 
7851 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7852 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7853                                      abi_long handle, abi_long mount_id,
7854                                      abi_long flags)
7855 {
7856     struct file_handle *target_fh;
7857     struct file_handle *fh;
7858     int mid = 0;
7859     abi_long ret;
7860     char *name;
7861     unsigned int size, total_size;
7862 
7863     if (get_user_s32(size, handle)) {
7864         return -TARGET_EFAULT;
7865     }
7866 
7867     name = lock_user_string(pathname);
7868     if (!name) {
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     total_size = sizeof(struct file_handle) + size;
7873     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7874     if (!target_fh) {
7875         unlock_user(name, pathname, 0);
7876         return -TARGET_EFAULT;
7877     }
7878 
7879     fh = g_malloc0(total_size);
7880     fh->handle_bytes = size;
7881 
7882     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7883     unlock_user(name, pathname, 0);
7884 
7885     /* man name_to_handle_at(2):
7886      * Other than the use of the handle_bytes field, the caller should treat
7887      * the file_handle structure as an opaque data type
7888      */
7889 
7890     memcpy(target_fh, fh, total_size);
7891     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7892     target_fh->handle_type = tswap32(fh->handle_type);
7893     g_free(fh);
7894     unlock_user(target_fh, handle, total_size);
7895 
7896     if (put_user_s32(mid, mount_id)) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     return ret;
7901 
7902 }
7903 #endif
7904 
7905 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7906 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7907                                      abi_long flags)
7908 {
7909     struct file_handle *target_fh;
7910     struct file_handle *fh;
7911     unsigned int size, total_size;
7912     abi_long ret;
7913 
7914     if (get_user_s32(size, handle)) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     total_size = sizeof(struct file_handle) + size;
7919     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7920     if (!target_fh) {
7921         return -TARGET_EFAULT;
7922     }
7923 
7924     fh = g_memdup(target_fh, total_size);
7925     fh->handle_bytes = size;
7926     fh->handle_type = tswap32(target_fh->handle_type);
7927 
7928     ret = get_errno(open_by_handle_at(mount_fd, fh,
7929                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7930 
7931     g_free(fh);
7932 
7933     unlock_user(target_fh, handle, total_size);
7934 
7935     return ret;
7936 }
7937 #endif
7938 
7939 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7940 
7941 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7942 {
7943     int host_flags;
7944     target_sigset_t *target_mask;
7945     sigset_t host_mask;
7946     abi_long ret;
7947 
7948     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7949         return -TARGET_EINVAL;
7950     }
7951     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7952         return -TARGET_EFAULT;
7953     }
7954 
7955     target_to_host_sigset(&host_mask, target_mask);
7956 
7957     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7958 
7959     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7960     if (ret >= 0) {
7961         fd_trans_register(ret, &target_signalfd_trans);
7962     }
7963 
7964     unlock_user_struct(target_mask, mask, 0);
7965 
7966     return ret;
7967 }
7968 #endif
7969 
7970 /* Map host to target signal numbers for the wait family of syscalls.
7971    Assume all other status bits are the same.  */
7972 int host_to_target_waitstatus(int status)
7973 {
7974     if (WIFSIGNALED(status)) {
7975         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7976     }
7977     if (WIFSTOPPED(status)) {
7978         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7979                | (status & 0xff);
7980     }
7981     return status;
7982 }
7983 
7984 static int open_self_cmdline(void *cpu_env, int fd)
7985 {
7986     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7987     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7988     int i;
7989 
7990     for (i = 0; i < bprm->argc; i++) {
7991         size_t len = strlen(bprm->argv[i]) + 1;
7992 
7993         if (write(fd, bprm->argv[i], len) != len) {
7994             return -1;
7995         }
7996     }
7997 
7998     return 0;
7999 }
8000 
8001 static int open_self_maps(void *cpu_env, int fd)
8002 {
8003     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8004     TaskState *ts = cpu->opaque;
8005     GSList *map_info = read_self_maps();
8006     GSList *s;
8007     int count;
8008 
8009     for (s = map_info; s; s = g_slist_next(s)) {
8010         MapInfo *e = (MapInfo *) s->data;
8011 
8012         if (h2g_valid(e->start)) {
8013             unsigned long min = e->start;
8014             unsigned long max = e->end;
8015             int flags = page_get_flags(h2g(min));
8016             const char *path;
8017 
8018             max = h2g_valid(max - 1) ?
8019                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8020 
8021             if (page_check_range(h2g(min), max - min, flags) == -1) {
8022                 continue;
8023             }
8024 
8025             if (h2g(min) == ts->info->stack_limit) {
8026                 path = "[stack]";
8027             } else {
8028                 path = e->path;
8029             }
8030 
8031             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8032                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8033                             h2g(min), h2g(max - 1) + 1,
8034                             (flags & PAGE_READ) ? 'r' : '-',
8035                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8036                             (flags & PAGE_EXEC) ? 'x' : '-',
8037                             e->is_priv ? 'p' : '-',
8038                             (uint64_t) e->offset, e->dev, e->inode);
8039             if (path) {
8040                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8041             } else {
8042                 dprintf(fd, "\n");
8043             }
8044         }
8045     }
8046 
8047     free_self_maps(map_info);
8048 
8049 #ifdef TARGET_VSYSCALL_PAGE
8050     /*
8051      * We only support execution from the vsyscall page.
8052      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8053      */
8054     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8055                     " --xp 00000000 00:00 0",
8056                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8057     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8058 #endif
8059 
8060     return 0;
8061 }
8062 
8063 static int open_self_stat(void *cpu_env, int fd)
8064 {
8065     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8066     TaskState *ts = cpu->opaque;
8067     g_autoptr(GString) buf = g_string_new(NULL);
8068     int i;
8069 
8070     for (i = 0; i < 44; i++) {
8071         if (i == 0) {
8072             /* pid */
8073             g_string_printf(buf, FMT_pid " ", getpid());
8074         } else if (i == 1) {
8075             /* app name */
8076             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8077             bin = bin ? bin + 1 : ts->bprm->argv[0];
8078             g_string_printf(buf, "(%.15s) ", bin);
8079         } else if (i == 3) {
8080             /* ppid */
8081             g_string_printf(buf, FMT_pid " ", getppid());
8082         } else if (i == 27) {
8083             /* stack bottom */
8084             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8085         } else {
8086             /* for the rest, there is MasterCard */
8087             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8088         }
8089 
8090         if (write(fd, buf->str, buf->len) != buf->len) {
8091             return -1;
8092         }
8093     }
8094 
8095     return 0;
8096 }
8097 
8098 static int open_self_auxv(void *cpu_env, int fd)
8099 {
8100     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8101     TaskState *ts = cpu->opaque;
8102     abi_ulong auxv = ts->info->saved_auxv;
8103     abi_ulong len = ts->info->auxv_len;
8104     char *ptr;
8105 
8106     /*
8107      * Auxiliary vector is stored in target process stack.
8108      * read in whole auxv vector and copy it to file
8109      */
8110     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8111     if (ptr != NULL) {
8112         while (len > 0) {
8113             ssize_t r;
8114             r = write(fd, ptr, len);
8115             if (r <= 0) {
8116                 break;
8117             }
8118             len -= r;
8119             ptr += r;
8120         }
8121         lseek(fd, 0, SEEK_SET);
8122         unlock_user(ptr, auxv, len);
8123     }
8124 
8125     return 0;
8126 }
8127 
8128 static int is_proc_myself(const char *filename, const char *entry)
8129 {
8130     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8131         filename += strlen("/proc/");
8132         if (!strncmp(filename, "self/", strlen("self/"))) {
8133             filename += strlen("self/");
8134         } else if (*filename >= '1' && *filename <= '9') {
8135             char myself[80];
8136             snprintf(myself, sizeof(myself), "%d/", getpid());
8137             if (!strncmp(filename, myself, strlen(myself))) {
8138                 filename += strlen(myself);
8139             } else {
8140                 return 0;
8141             }
8142         } else {
8143             return 0;
8144         }
8145         if (!strcmp(filename, entry)) {
8146             return 1;
8147         }
8148     }
8149     return 0;
8150 }
8151 
8152 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8153     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8154 static int is_proc(const char *filename, const char *entry)
8155 {
8156     return strcmp(filename, entry) == 0;
8157 }
8158 #endif
8159 
8160 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8161 static int open_net_route(void *cpu_env, int fd)
8162 {
8163     FILE *fp;
8164     char *line = NULL;
8165     size_t len = 0;
8166     ssize_t read;
8167 
8168     fp = fopen("/proc/net/route", "r");
8169     if (fp == NULL) {
8170         return -1;
8171     }
8172 
8173     /* read header */
8174 
8175     read = getline(&line, &len, fp);
8176     dprintf(fd, "%s", line);
8177 
8178     /* read routes */
8179 
8180     while ((read = getline(&line, &len, fp)) != -1) {
8181         char iface[16];
8182         uint32_t dest, gw, mask;
8183         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8184         int fields;
8185 
8186         fields = sscanf(line,
8187                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8188                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8189                         &mask, &mtu, &window, &irtt);
8190         if (fields != 11) {
8191             continue;
8192         }
8193         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8194                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8195                 metric, tswap32(mask), mtu, window, irtt);
8196     }
8197 
8198     free(line);
8199     fclose(fp);
8200 
8201     return 0;
8202 }
8203 #endif
8204 
8205 #if defined(TARGET_SPARC)
8206 static int open_cpuinfo(void *cpu_env, int fd)
8207 {
8208     dprintf(fd, "type\t\t: sun4u\n");
8209     return 0;
8210 }
8211 #endif
8212 
8213 #if defined(TARGET_HPPA)
8214 static int open_cpuinfo(void *cpu_env, int fd)
8215 {
8216     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8217     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8218     dprintf(fd, "capabilities\t: os32\n");
8219     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8220     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8221     return 0;
8222 }
8223 #endif
8224 
8225 #if defined(TARGET_M68K)
8226 static int open_hardware(void *cpu_env, int fd)
8227 {
8228     dprintf(fd, "Model:\t\tqemu-m68k\n");
8229     return 0;
8230 }
8231 #endif
8232 
8233 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8234 {
8235     struct fake_open {
8236         const char *filename;
8237         int (*fill)(void *cpu_env, int fd);
8238         int (*cmp)(const char *s1, const char *s2);
8239     };
8240     const struct fake_open *fake_open;
8241     static const struct fake_open fakes[] = {
8242         { "maps", open_self_maps, is_proc_myself },
8243         { "stat", open_self_stat, is_proc_myself },
8244         { "auxv", open_self_auxv, is_proc_myself },
8245         { "cmdline", open_self_cmdline, is_proc_myself },
8246 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8247         { "/proc/net/route", open_net_route, is_proc },
8248 #endif
8249 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8250         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8251 #endif
8252 #if defined(TARGET_M68K)
8253         { "/proc/hardware", open_hardware, is_proc },
8254 #endif
8255         { NULL, NULL, NULL }
8256     };
8257 
8258     if (is_proc_myself(pathname, "exe")) {
8259         int execfd = qemu_getauxval(AT_EXECFD);
8260         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8261     }
8262 
8263     for (fake_open = fakes; fake_open->filename; fake_open++) {
8264         if (fake_open->cmp(pathname, fake_open->filename)) {
8265             break;
8266         }
8267     }
8268 
8269     if (fake_open->filename) {
8270         const char *tmpdir;
8271         char filename[PATH_MAX];
8272         int fd, r;
8273 
8274         /* create temporary file to map stat to */
8275         tmpdir = getenv("TMPDIR");
8276         if (!tmpdir)
8277             tmpdir = "/tmp";
8278         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8279         fd = mkstemp(filename);
8280         if (fd < 0) {
8281             return fd;
8282         }
8283         unlink(filename);
8284 
8285         if ((r = fake_open->fill(cpu_env, fd))) {
8286             int e = errno;
8287             close(fd);
8288             errno = e;
8289             return r;
8290         }
8291         lseek(fd, 0, SEEK_SET);
8292 
8293         return fd;
8294     }
8295 
8296     return safe_openat(dirfd, path(pathname), flags, mode);
8297 }
8298 
8299 #define TIMER_MAGIC 0x0caf0000
8300 #define TIMER_MAGIC_MASK 0xffff0000
8301 
8302 /* Convert QEMU provided timer ID back to internal 16bit index format */
8303 static target_timer_t get_timer_id(abi_long arg)
8304 {
8305     target_timer_t timerid = arg;
8306 
8307     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8308         return -TARGET_EINVAL;
8309     }
8310 
8311     timerid &= 0xffff;
8312 
8313     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8314         return -TARGET_EINVAL;
8315     }
8316 
8317     return timerid;
8318 }
8319 
8320 static int target_to_host_cpu_mask(unsigned long *host_mask,
8321                                    size_t host_size,
8322                                    abi_ulong target_addr,
8323                                    size_t target_size)
8324 {
8325     unsigned target_bits = sizeof(abi_ulong) * 8;
8326     unsigned host_bits = sizeof(*host_mask) * 8;
8327     abi_ulong *target_mask;
8328     unsigned i, j;
8329 
8330     assert(host_size >= target_size);
8331 
8332     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8333     if (!target_mask) {
8334         return -TARGET_EFAULT;
8335     }
8336     memset(host_mask, 0, host_size);
8337 
8338     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8339         unsigned bit = i * target_bits;
8340         abi_ulong val;
8341 
8342         __get_user(val, &target_mask[i]);
8343         for (j = 0; j < target_bits; j++, bit++) {
8344             if (val & (1UL << j)) {
8345                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8346             }
8347         }
8348     }
8349 
8350     unlock_user(target_mask, target_addr, 0);
8351     return 0;
8352 }
8353 
8354 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8355                                    size_t host_size,
8356                                    abi_ulong target_addr,
8357                                    size_t target_size)
8358 {
8359     unsigned target_bits = sizeof(abi_ulong) * 8;
8360     unsigned host_bits = sizeof(*host_mask) * 8;
8361     abi_ulong *target_mask;
8362     unsigned i, j;
8363 
8364     assert(host_size >= target_size);
8365 
8366     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8367     if (!target_mask) {
8368         return -TARGET_EFAULT;
8369     }
8370 
8371     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8372         unsigned bit = i * target_bits;
8373         abi_ulong val = 0;
8374 
8375         for (j = 0; j < target_bits; j++, bit++) {
8376             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8377                 val |= 1UL << j;
8378             }
8379         }
8380         __put_user(val, &target_mask[i]);
8381     }
8382 
8383     unlock_user(target_mask, target_addr, target_size);
8384     return 0;
8385 }
8386 
8387 #ifdef TARGET_NR_getdents
8388 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8389 {
8390     g_autofree void *hdirp = NULL;
8391     void *tdirp;
8392     int hlen, hoff, toff;
8393     int hreclen, treclen;
8394     off64_t prev_diroff = 0;
8395 
8396     hdirp = g_try_malloc(count);
8397     if (!hdirp) {
8398         return -TARGET_ENOMEM;
8399     }
8400 
8401 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8402     hlen = sys_getdents(dirfd, hdirp, count);
8403 #else
8404     hlen = sys_getdents64(dirfd, hdirp, count);
8405 #endif
8406 
8407     hlen = get_errno(hlen);
8408     if (is_error(hlen)) {
8409         return hlen;
8410     }
8411 
8412     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8413     if (!tdirp) {
8414         return -TARGET_EFAULT;
8415     }
8416 
8417     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8418 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8419         struct linux_dirent *hde = hdirp + hoff;
8420 #else
8421         struct linux_dirent64 *hde = hdirp + hoff;
8422 #endif
8423         struct target_dirent *tde = tdirp + toff;
8424         int namelen;
8425         uint8_t type;
8426 
8427         namelen = strlen(hde->d_name);
8428         hreclen = hde->d_reclen;
8429         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8430         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8431 
8432         if (toff + treclen > count) {
8433             /*
8434              * If the host struct is smaller than the target struct, or
8435              * requires less alignment and thus packs into less space,
8436              * then the host can return more entries than we can pass
8437              * on to the guest.
8438              */
8439             if (toff == 0) {
8440                 toff = -TARGET_EINVAL; /* result buffer is too small */
8441                 break;
8442             }
8443             /*
8444              * Return what we have, resetting the file pointer to the
8445              * location of the first record not returned.
8446              */
8447             lseek64(dirfd, prev_diroff, SEEK_SET);
8448             break;
8449         }
8450 
8451         prev_diroff = hde->d_off;
8452         tde->d_ino = tswapal(hde->d_ino);
8453         tde->d_off = tswapal(hde->d_off);
8454         tde->d_reclen = tswap16(treclen);
8455         memcpy(tde->d_name, hde->d_name, namelen + 1);
8456 
8457         /*
8458          * The getdents type is in what was formerly a padding byte at the
8459          * end of the structure.
8460          */
8461 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8462         type = *((uint8_t *)hde + hreclen - 1);
8463 #else
8464         type = hde->d_type;
8465 #endif
8466         *((uint8_t *)tde + treclen - 1) = type;
8467     }
8468 
8469     unlock_user(tdirp, arg2, toff);
8470     return toff;
8471 }
8472 #endif /* TARGET_NR_getdents */
8473 
8474 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8475 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8476 {
8477     g_autofree void *hdirp = NULL;
8478     void *tdirp;
8479     int hlen, hoff, toff;
8480     int hreclen, treclen;
8481     off64_t prev_diroff = 0;
8482 
8483     hdirp = g_try_malloc(count);
8484     if (!hdirp) {
8485         return -TARGET_ENOMEM;
8486     }
8487 
8488     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8489     if (is_error(hlen)) {
8490         return hlen;
8491     }
8492 
8493     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8494     if (!tdirp) {
8495         return -TARGET_EFAULT;
8496     }
8497 
8498     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8499         struct linux_dirent64 *hde = hdirp + hoff;
8500         struct target_dirent64 *tde = tdirp + toff;
8501         int namelen;
8502 
8503         namelen = strlen(hde->d_name) + 1;
8504         hreclen = hde->d_reclen;
8505         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8506         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8507 
8508         if (toff + treclen > count) {
8509             /*
8510              * If the host struct is smaller than the target struct, or
8511              * requires less alignment and thus packs into less space,
8512              * then the host can return more entries than we can pass
8513              * on to the guest.
8514              */
8515             if (toff == 0) {
8516                 toff = -TARGET_EINVAL; /* result buffer is too small */
8517                 break;
8518             }
8519             /*
8520              * Return what we have, resetting the file pointer to the
8521              * location of the first record not returned.
8522              */
8523             lseek64(dirfd, prev_diroff, SEEK_SET);
8524             break;
8525         }
8526 
8527         prev_diroff = hde->d_off;
8528         tde->d_ino = tswap64(hde->d_ino);
8529         tde->d_off = tswap64(hde->d_off);
8530         tde->d_reclen = tswap16(treclen);
8531         tde->d_type = hde->d_type;
8532         memcpy(tde->d_name, hde->d_name, namelen);
8533     }
8534 
8535     unlock_user(tdirp, arg2, toff);
8536     return toff;
8537 }
8538 #endif /* TARGET_NR_getdents64 */
8539 
8540 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8541 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8542 #endif
8543 
8544 /* This is an internal helper for do_syscall so that it is easier
8545  * to have a single return point, so that actions, such as logging
8546  * of syscall results, can be performed.
8547  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8548  */
8549 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8550                             abi_long arg2, abi_long arg3, abi_long arg4,
8551                             abi_long arg5, abi_long arg6, abi_long arg7,
8552                             abi_long arg8)
8553 {
8554     CPUState *cpu = env_cpu(cpu_env);
8555     abi_long ret;
8556 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8557     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8558     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8559     || defined(TARGET_NR_statx)
8560     struct stat st;
8561 #endif
8562 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8563     || defined(TARGET_NR_fstatfs)
8564     struct statfs stfs;
8565 #endif
8566     void *p;
8567 
8568     switch(num) {
8569     case TARGET_NR_exit:
8570         /* In old applications this may be used to implement _exit(2).
8571            However in threaded applications it is used for thread termination,
8572            and _exit_group is used for application termination.
8573            Do thread termination if we have more then one thread.  */
8574 
8575         if (block_signals()) {
8576             return -QEMU_ERESTARTSYS;
8577         }
8578 
8579         pthread_mutex_lock(&clone_lock);
8580 
8581         if (CPU_NEXT(first_cpu)) {
8582             TaskState *ts = cpu->opaque;
8583 
8584             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8585             object_unref(OBJECT(cpu));
8586             /*
8587              * At this point the CPU should be unrealized and removed
8588              * from cpu lists. We can clean-up the rest of the thread
8589              * data without the lock held.
8590              */
8591 
8592             pthread_mutex_unlock(&clone_lock);
8593 
8594             if (ts->child_tidptr) {
8595                 put_user_u32(0, ts->child_tidptr);
8596                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8597                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8598             }
8599             thread_cpu = NULL;
8600             g_free(ts);
8601             rcu_unregister_thread();
8602             pthread_exit(NULL);
8603         }
8604 
8605         pthread_mutex_unlock(&clone_lock);
8606         preexit_cleanup(cpu_env, arg1);
8607         _exit(arg1);
8608         return 0; /* avoid warning */
8609     case TARGET_NR_read:
8610         if (arg2 == 0 && arg3 == 0) {
8611             return get_errno(safe_read(arg1, 0, 0));
8612         } else {
8613             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8614                 return -TARGET_EFAULT;
8615             ret = get_errno(safe_read(arg1, p, arg3));
8616             if (ret >= 0 &&
8617                 fd_trans_host_to_target_data(arg1)) {
8618                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8619             }
8620             unlock_user(p, arg2, ret);
8621         }
8622         return ret;
8623     case TARGET_NR_write:
8624         if (arg2 == 0 && arg3 == 0) {
8625             return get_errno(safe_write(arg1, 0, 0));
8626         }
8627         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8628             return -TARGET_EFAULT;
8629         if (fd_trans_target_to_host_data(arg1)) {
8630             void *copy = g_malloc(arg3);
8631             memcpy(copy, p, arg3);
8632             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8633             if (ret >= 0) {
8634                 ret = get_errno(safe_write(arg1, copy, ret));
8635             }
8636             g_free(copy);
8637         } else {
8638             ret = get_errno(safe_write(arg1, p, arg3));
8639         }
8640         unlock_user(p, arg2, 0);
8641         return ret;
8642 
8643 #ifdef TARGET_NR_open
8644     case TARGET_NR_open:
8645         if (!(p = lock_user_string(arg1)))
8646             return -TARGET_EFAULT;
8647         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8648                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8649                                   arg3));
8650         fd_trans_unregister(ret);
8651         unlock_user(p, arg1, 0);
8652         return ret;
8653 #endif
8654     case TARGET_NR_openat:
8655         if (!(p = lock_user_string(arg2)))
8656             return -TARGET_EFAULT;
8657         ret = get_errno(do_openat(cpu_env, arg1, p,
8658                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8659                                   arg4));
8660         fd_trans_unregister(ret);
8661         unlock_user(p, arg2, 0);
8662         return ret;
8663 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8664     case TARGET_NR_name_to_handle_at:
8665         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8666         return ret;
8667 #endif
8668 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8669     case TARGET_NR_open_by_handle_at:
8670         ret = do_open_by_handle_at(arg1, arg2, arg3);
8671         fd_trans_unregister(ret);
8672         return ret;
8673 #endif
8674     case TARGET_NR_close:
8675         fd_trans_unregister(arg1);
8676         return get_errno(close(arg1));
8677 
8678     case TARGET_NR_brk:
8679         return do_brk(arg1);
8680 #ifdef TARGET_NR_fork
8681     case TARGET_NR_fork:
8682         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8683 #endif
8684 #ifdef TARGET_NR_waitpid
8685     case TARGET_NR_waitpid:
8686         {
8687             int status;
8688             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8689             if (!is_error(ret) && arg2 && ret
8690                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8691                 return -TARGET_EFAULT;
8692         }
8693         return ret;
8694 #endif
8695 #ifdef TARGET_NR_waitid
8696     case TARGET_NR_waitid:
8697         {
8698             siginfo_t info;
8699             info.si_pid = 0;
8700             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8701             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8702                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8703                     return -TARGET_EFAULT;
8704                 host_to_target_siginfo(p, &info);
8705                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8706             }
8707         }
8708         return ret;
8709 #endif
8710 #ifdef TARGET_NR_creat /* not on alpha */
8711     case TARGET_NR_creat:
8712         if (!(p = lock_user_string(arg1)))
8713             return -TARGET_EFAULT;
8714         ret = get_errno(creat(p, arg2));
8715         fd_trans_unregister(ret);
8716         unlock_user(p, arg1, 0);
8717         return ret;
8718 #endif
8719 #ifdef TARGET_NR_link
8720     case TARGET_NR_link:
8721         {
8722             void * p2;
8723             p = lock_user_string(arg1);
8724             p2 = lock_user_string(arg2);
8725             if (!p || !p2)
8726                 ret = -TARGET_EFAULT;
8727             else
8728                 ret = get_errno(link(p, p2));
8729             unlock_user(p2, arg2, 0);
8730             unlock_user(p, arg1, 0);
8731         }
8732         return ret;
8733 #endif
8734 #if defined(TARGET_NR_linkat)
8735     case TARGET_NR_linkat:
8736         {
8737             void * p2 = NULL;
8738             if (!arg2 || !arg4)
8739                 return -TARGET_EFAULT;
8740             p  = lock_user_string(arg2);
8741             p2 = lock_user_string(arg4);
8742             if (!p || !p2)
8743                 ret = -TARGET_EFAULT;
8744             else
8745                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8746             unlock_user(p, arg2, 0);
8747             unlock_user(p2, arg4, 0);
8748         }
8749         return ret;
8750 #endif
8751 #ifdef TARGET_NR_unlink
8752     case TARGET_NR_unlink:
8753         if (!(p = lock_user_string(arg1)))
8754             return -TARGET_EFAULT;
8755         ret = get_errno(unlink(p));
8756         unlock_user(p, arg1, 0);
8757         return ret;
8758 #endif
8759 #if defined(TARGET_NR_unlinkat)
8760     case TARGET_NR_unlinkat:
8761         if (!(p = lock_user_string(arg2)))
8762             return -TARGET_EFAULT;
8763         ret = get_errno(unlinkat(arg1, p, arg3));
8764         unlock_user(p, arg2, 0);
8765         return ret;
8766 #endif
8767     case TARGET_NR_execve:
8768         {
8769             char **argp, **envp;
8770             int argc, envc;
8771             abi_ulong gp;
8772             abi_ulong guest_argp;
8773             abi_ulong guest_envp;
8774             abi_ulong addr;
8775             char **q;
8776 
8777             argc = 0;
8778             guest_argp = arg2;
8779             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8780                 if (get_user_ual(addr, gp))
8781                     return -TARGET_EFAULT;
8782                 if (!addr)
8783                     break;
8784                 argc++;
8785             }
8786             envc = 0;
8787             guest_envp = arg3;
8788             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8789                 if (get_user_ual(addr, gp))
8790                     return -TARGET_EFAULT;
8791                 if (!addr)
8792                     break;
8793                 envc++;
8794             }
8795 
8796             argp = g_new0(char *, argc + 1);
8797             envp = g_new0(char *, envc + 1);
8798 
8799             for (gp = guest_argp, q = argp; gp;
8800                   gp += sizeof(abi_ulong), q++) {
8801                 if (get_user_ual(addr, gp))
8802                     goto execve_efault;
8803                 if (!addr)
8804                     break;
8805                 if (!(*q = lock_user_string(addr)))
8806                     goto execve_efault;
8807             }
8808             *q = NULL;
8809 
8810             for (gp = guest_envp, q = envp; gp;
8811                   gp += sizeof(abi_ulong), q++) {
8812                 if (get_user_ual(addr, gp))
8813                     goto execve_efault;
8814                 if (!addr)
8815                     break;
8816                 if (!(*q = lock_user_string(addr)))
8817                     goto execve_efault;
8818             }
8819             *q = NULL;
8820 
8821             if (!(p = lock_user_string(arg1)))
8822                 goto execve_efault;
8823             /* Although execve() is not an interruptible syscall it is
8824              * a special case where we must use the safe_syscall wrapper:
8825              * if we allow a signal to happen before we make the host
8826              * syscall then we will 'lose' it, because at the point of
8827              * execve the process leaves QEMU's control. So we use the
8828              * safe syscall wrapper to ensure that we either take the
8829              * signal as a guest signal, or else it does not happen
8830              * before the execve completes and makes it the other
8831              * program's problem.
8832              */
8833             ret = get_errno(safe_execve(p, argp, envp));
8834             unlock_user(p, arg1, 0);
8835 
8836             goto execve_end;
8837 
8838         execve_efault:
8839             ret = -TARGET_EFAULT;
8840 
8841         execve_end:
8842             for (gp = guest_argp, q = argp; *q;
8843                   gp += sizeof(abi_ulong), q++) {
8844                 if (get_user_ual(addr, gp)
8845                     || !addr)
8846                     break;
8847                 unlock_user(*q, addr, 0);
8848             }
8849             for (gp = guest_envp, q = envp; *q;
8850                   gp += sizeof(abi_ulong), q++) {
8851                 if (get_user_ual(addr, gp)
8852                     || !addr)
8853                     break;
8854                 unlock_user(*q, addr, 0);
8855             }
8856 
8857             g_free(argp);
8858             g_free(envp);
8859         }
8860         return ret;
8861     case TARGET_NR_chdir:
8862         if (!(p = lock_user_string(arg1)))
8863             return -TARGET_EFAULT;
8864         ret = get_errno(chdir(p));
8865         unlock_user(p, arg1, 0);
8866         return ret;
8867 #ifdef TARGET_NR_time
8868     case TARGET_NR_time:
8869         {
8870             time_t host_time;
8871             ret = get_errno(time(&host_time));
8872             if (!is_error(ret)
8873                 && arg1
8874                 && put_user_sal(host_time, arg1))
8875                 return -TARGET_EFAULT;
8876         }
8877         return ret;
8878 #endif
8879 #ifdef TARGET_NR_mknod
8880     case TARGET_NR_mknod:
8881         if (!(p = lock_user_string(arg1)))
8882             return -TARGET_EFAULT;
8883         ret = get_errno(mknod(p, arg2, arg3));
8884         unlock_user(p, arg1, 0);
8885         return ret;
8886 #endif
8887 #if defined(TARGET_NR_mknodat)
8888     case TARGET_NR_mknodat:
8889         if (!(p = lock_user_string(arg2)))
8890             return -TARGET_EFAULT;
8891         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8892         unlock_user(p, arg2, 0);
8893         return ret;
8894 #endif
8895 #ifdef TARGET_NR_chmod
8896     case TARGET_NR_chmod:
8897         if (!(p = lock_user_string(arg1)))
8898             return -TARGET_EFAULT;
8899         ret = get_errno(chmod(p, arg2));
8900         unlock_user(p, arg1, 0);
8901         return ret;
8902 #endif
8903 #ifdef TARGET_NR_lseek
8904     case TARGET_NR_lseek:
8905         return get_errno(lseek(arg1, arg2, arg3));
8906 #endif
8907 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8908     /* Alpha specific */
8909     case TARGET_NR_getxpid:
8910         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8911         return get_errno(getpid());
8912 #endif
8913 #ifdef TARGET_NR_getpid
8914     case TARGET_NR_getpid:
8915         return get_errno(getpid());
8916 #endif
8917     case TARGET_NR_mount:
8918         {
8919             /* need to look at the data field */
8920             void *p2, *p3;
8921 
8922             if (arg1) {
8923                 p = lock_user_string(arg1);
8924                 if (!p) {
8925                     return -TARGET_EFAULT;
8926                 }
8927             } else {
8928                 p = NULL;
8929             }
8930 
8931             p2 = lock_user_string(arg2);
8932             if (!p2) {
8933                 if (arg1) {
8934                     unlock_user(p, arg1, 0);
8935                 }
8936                 return -TARGET_EFAULT;
8937             }
8938 
8939             if (arg3) {
8940                 p3 = lock_user_string(arg3);
8941                 if (!p3) {
8942                     if (arg1) {
8943                         unlock_user(p, arg1, 0);
8944                     }
8945                     unlock_user(p2, arg2, 0);
8946                     return -TARGET_EFAULT;
8947                 }
8948             } else {
8949                 p3 = NULL;
8950             }
8951 
8952             /* FIXME - arg5 should be locked, but it isn't clear how to
8953              * do that since it's not guaranteed to be a NULL-terminated
8954              * string.
8955              */
8956             if (!arg5) {
8957                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8958             } else {
8959                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8960             }
8961             ret = get_errno(ret);
8962 
8963             if (arg1) {
8964                 unlock_user(p, arg1, 0);
8965             }
8966             unlock_user(p2, arg2, 0);
8967             if (arg3) {
8968                 unlock_user(p3, arg3, 0);
8969             }
8970         }
8971         return ret;
8972 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8973 #if defined(TARGET_NR_umount)
8974     case TARGET_NR_umount:
8975 #endif
8976 #if defined(TARGET_NR_oldumount)
8977     case TARGET_NR_oldumount:
8978 #endif
8979         if (!(p = lock_user_string(arg1)))
8980             return -TARGET_EFAULT;
8981         ret = get_errno(umount(p));
8982         unlock_user(p, arg1, 0);
8983         return ret;
8984 #endif
8985 #ifdef TARGET_NR_stime /* not on alpha */
8986     case TARGET_NR_stime:
8987         {
8988             struct timespec ts;
8989             ts.tv_nsec = 0;
8990             if (get_user_sal(ts.tv_sec, arg1)) {
8991                 return -TARGET_EFAULT;
8992             }
8993             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8994         }
8995 #endif
8996 #ifdef TARGET_NR_alarm /* not on alpha */
8997     case TARGET_NR_alarm:
8998         return alarm(arg1);
8999 #endif
9000 #ifdef TARGET_NR_pause /* not on alpha */
9001     case TARGET_NR_pause:
9002         if (!block_signals()) {
9003             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9004         }
9005         return -TARGET_EINTR;
9006 #endif
9007 #ifdef TARGET_NR_utime
9008     case TARGET_NR_utime:
9009         {
9010             struct utimbuf tbuf, *host_tbuf;
9011             struct target_utimbuf *target_tbuf;
9012             if (arg2) {
9013                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9014                     return -TARGET_EFAULT;
9015                 tbuf.actime = tswapal(target_tbuf->actime);
9016                 tbuf.modtime = tswapal(target_tbuf->modtime);
9017                 unlock_user_struct(target_tbuf, arg2, 0);
9018                 host_tbuf = &tbuf;
9019             } else {
9020                 host_tbuf = NULL;
9021             }
9022             if (!(p = lock_user_string(arg1)))
9023                 return -TARGET_EFAULT;
9024             ret = get_errno(utime(p, host_tbuf));
9025             unlock_user(p, arg1, 0);
9026         }
9027         return ret;
9028 #endif
9029 #ifdef TARGET_NR_utimes
9030     case TARGET_NR_utimes:
9031         {
9032             struct timeval *tvp, tv[2];
9033             if (arg2) {
9034                 if (copy_from_user_timeval(&tv[0], arg2)
9035                     || copy_from_user_timeval(&tv[1],
9036                                               arg2 + sizeof(struct target_timeval)))
9037                     return -TARGET_EFAULT;
9038                 tvp = tv;
9039             } else {
9040                 tvp = NULL;
9041             }
9042             if (!(p = lock_user_string(arg1)))
9043                 return -TARGET_EFAULT;
9044             ret = get_errno(utimes(p, tvp));
9045             unlock_user(p, arg1, 0);
9046         }
9047         return ret;
9048 #endif
9049 #if defined(TARGET_NR_futimesat)
9050     case TARGET_NR_futimesat:
9051         {
9052             struct timeval *tvp, tv[2];
9053             if (arg3) {
9054                 if (copy_from_user_timeval(&tv[0], arg3)
9055                     || copy_from_user_timeval(&tv[1],
9056                                               arg3 + sizeof(struct target_timeval)))
9057                     return -TARGET_EFAULT;
9058                 tvp = tv;
9059             } else {
9060                 tvp = NULL;
9061             }
9062             if (!(p = lock_user_string(arg2))) {
9063                 return -TARGET_EFAULT;
9064             }
9065             ret = get_errno(futimesat(arg1, path(p), tvp));
9066             unlock_user(p, arg2, 0);
9067         }
9068         return ret;
9069 #endif
9070 #ifdef TARGET_NR_access
9071     case TARGET_NR_access:
9072         if (!(p = lock_user_string(arg1))) {
9073             return -TARGET_EFAULT;
9074         }
9075         ret = get_errno(access(path(p), arg2));
9076         unlock_user(p, arg1, 0);
9077         return ret;
9078 #endif
9079 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9080     case TARGET_NR_faccessat:
9081         if (!(p = lock_user_string(arg2))) {
9082             return -TARGET_EFAULT;
9083         }
9084         ret = get_errno(faccessat(arg1, p, arg3, 0));
9085         unlock_user(p, arg2, 0);
9086         return ret;
9087 #endif
9088 #ifdef TARGET_NR_nice /* not on alpha */
9089     case TARGET_NR_nice:
9090         return get_errno(nice(arg1));
9091 #endif
9092     case TARGET_NR_sync:
9093         sync();
9094         return 0;
9095 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9096     case TARGET_NR_syncfs:
9097         return get_errno(syncfs(arg1));
9098 #endif
9099     case TARGET_NR_kill:
9100         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9101 #ifdef TARGET_NR_rename
9102     case TARGET_NR_rename:
9103         {
9104             void *p2;
9105             p = lock_user_string(arg1);
9106             p2 = lock_user_string(arg2);
9107             if (!p || !p2)
9108                 ret = -TARGET_EFAULT;
9109             else
9110                 ret = get_errno(rename(p, p2));
9111             unlock_user(p2, arg2, 0);
9112             unlock_user(p, arg1, 0);
9113         }
9114         return ret;
9115 #endif
9116 #if defined(TARGET_NR_renameat)
9117     case TARGET_NR_renameat:
9118         {
9119             void *p2;
9120             p  = lock_user_string(arg2);
9121             p2 = lock_user_string(arg4);
9122             if (!p || !p2)
9123                 ret = -TARGET_EFAULT;
9124             else
9125                 ret = get_errno(renameat(arg1, p, arg3, p2));
9126             unlock_user(p2, arg4, 0);
9127             unlock_user(p, arg2, 0);
9128         }
9129         return ret;
9130 #endif
9131 #if defined(TARGET_NR_renameat2)
9132     case TARGET_NR_renameat2:
9133         {
9134             void *p2;
9135             p  = lock_user_string(arg2);
9136             p2 = lock_user_string(arg4);
9137             if (!p || !p2) {
9138                 ret = -TARGET_EFAULT;
9139             } else {
9140                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9141             }
9142             unlock_user(p2, arg4, 0);
9143             unlock_user(p, arg2, 0);
9144         }
9145         return ret;
9146 #endif
9147 #ifdef TARGET_NR_mkdir
9148     case TARGET_NR_mkdir:
9149         if (!(p = lock_user_string(arg1)))
9150             return -TARGET_EFAULT;
9151         ret = get_errno(mkdir(p, arg2));
9152         unlock_user(p, arg1, 0);
9153         return ret;
9154 #endif
9155 #if defined(TARGET_NR_mkdirat)
9156     case TARGET_NR_mkdirat:
9157         if (!(p = lock_user_string(arg2)))
9158             return -TARGET_EFAULT;
9159         ret = get_errno(mkdirat(arg1, p, arg3));
9160         unlock_user(p, arg2, 0);
9161         return ret;
9162 #endif
9163 #ifdef TARGET_NR_rmdir
9164     case TARGET_NR_rmdir:
9165         if (!(p = lock_user_string(arg1)))
9166             return -TARGET_EFAULT;
9167         ret = get_errno(rmdir(p));
9168         unlock_user(p, arg1, 0);
9169         return ret;
9170 #endif
9171     case TARGET_NR_dup:
9172         ret = get_errno(dup(arg1));
9173         if (ret >= 0) {
9174             fd_trans_dup(arg1, ret);
9175         }
9176         return ret;
9177 #ifdef TARGET_NR_pipe
9178     case TARGET_NR_pipe:
9179         return do_pipe(cpu_env, arg1, 0, 0);
9180 #endif
9181 #ifdef TARGET_NR_pipe2
9182     case TARGET_NR_pipe2:
9183         return do_pipe(cpu_env, arg1,
9184                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9185 #endif
9186     case TARGET_NR_times:
9187         {
9188             struct target_tms *tmsp;
9189             struct tms tms;
9190             ret = get_errno(times(&tms));
9191             if (arg1) {
9192                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9193                 if (!tmsp)
9194                     return -TARGET_EFAULT;
9195                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9196                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9197                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9198                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9199             }
9200             if (!is_error(ret))
9201                 ret = host_to_target_clock_t(ret);
9202         }
9203         return ret;
9204     case TARGET_NR_acct:
9205         if (arg1 == 0) {
9206             ret = get_errno(acct(NULL));
9207         } else {
9208             if (!(p = lock_user_string(arg1))) {
9209                 return -TARGET_EFAULT;
9210             }
9211             ret = get_errno(acct(path(p)));
9212             unlock_user(p, arg1, 0);
9213         }
9214         return ret;
9215 #ifdef TARGET_NR_umount2
9216     case TARGET_NR_umount2:
9217         if (!(p = lock_user_string(arg1)))
9218             return -TARGET_EFAULT;
9219         ret = get_errno(umount2(p, arg2));
9220         unlock_user(p, arg1, 0);
9221         return ret;
9222 #endif
9223     case TARGET_NR_ioctl:
9224         return do_ioctl(arg1, arg2, arg3);
9225 #ifdef TARGET_NR_fcntl
9226     case TARGET_NR_fcntl:
9227         return do_fcntl(arg1, arg2, arg3);
9228 #endif
9229     case TARGET_NR_setpgid:
9230         return get_errno(setpgid(arg1, arg2));
9231     case TARGET_NR_umask:
9232         return get_errno(umask(arg1));
9233     case TARGET_NR_chroot:
9234         if (!(p = lock_user_string(arg1)))
9235             return -TARGET_EFAULT;
9236         ret = get_errno(chroot(p));
9237         unlock_user(p, arg1, 0);
9238         return ret;
9239 #ifdef TARGET_NR_dup2
9240     case TARGET_NR_dup2:
9241         ret = get_errno(dup2(arg1, arg2));
9242         if (ret >= 0) {
9243             fd_trans_dup(arg1, arg2);
9244         }
9245         return ret;
9246 #endif
9247 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9248     case TARGET_NR_dup3:
9249     {
9250         int host_flags;
9251 
9252         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9253             return -EINVAL;
9254         }
9255         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9256         ret = get_errno(dup3(arg1, arg2, host_flags));
9257         if (ret >= 0) {
9258             fd_trans_dup(arg1, arg2);
9259         }
9260         return ret;
9261     }
9262 #endif
9263 #ifdef TARGET_NR_getppid /* not on alpha */
9264     case TARGET_NR_getppid:
9265         return get_errno(getppid());
9266 #endif
9267 #ifdef TARGET_NR_getpgrp
9268     case TARGET_NR_getpgrp:
9269         return get_errno(getpgrp());
9270 #endif
9271     case TARGET_NR_setsid:
9272         return get_errno(setsid());
9273 #ifdef TARGET_NR_sigaction
9274     case TARGET_NR_sigaction:
9275         {
9276 #if defined(TARGET_MIPS)
9277 	    struct target_sigaction act, oact, *pact, *old_act;
9278 
9279 	    if (arg2) {
9280                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9281                     return -TARGET_EFAULT;
9282 		act._sa_handler = old_act->_sa_handler;
9283 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9284 		act.sa_flags = old_act->sa_flags;
9285 		unlock_user_struct(old_act, arg2, 0);
9286 		pact = &act;
9287 	    } else {
9288 		pact = NULL;
9289 	    }
9290 
9291         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9292 
9293 	    if (!is_error(ret) && arg3) {
9294                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9295                     return -TARGET_EFAULT;
9296 		old_act->_sa_handler = oact._sa_handler;
9297 		old_act->sa_flags = oact.sa_flags;
9298 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9299 		old_act->sa_mask.sig[1] = 0;
9300 		old_act->sa_mask.sig[2] = 0;
9301 		old_act->sa_mask.sig[3] = 0;
9302 		unlock_user_struct(old_act, arg3, 1);
9303 	    }
9304 #else
9305             struct target_old_sigaction *old_act;
9306             struct target_sigaction act, oact, *pact;
9307             if (arg2) {
9308                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9309                     return -TARGET_EFAULT;
9310                 act._sa_handler = old_act->_sa_handler;
9311                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9312                 act.sa_flags = old_act->sa_flags;
9313 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9314                 act.sa_restorer = old_act->sa_restorer;
9315 #endif
9316                 unlock_user_struct(old_act, arg2, 0);
9317                 pact = &act;
9318             } else {
9319                 pact = NULL;
9320             }
9321             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9322             if (!is_error(ret) && arg3) {
9323                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9324                     return -TARGET_EFAULT;
9325                 old_act->_sa_handler = oact._sa_handler;
9326                 old_act->sa_mask = oact.sa_mask.sig[0];
9327                 old_act->sa_flags = oact.sa_flags;
9328 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9329                 old_act->sa_restorer = oact.sa_restorer;
9330 #endif
9331                 unlock_user_struct(old_act, arg3, 1);
9332             }
9333 #endif
9334         }
9335         return ret;
9336 #endif
9337     case TARGET_NR_rt_sigaction:
9338         {
9339             /*
9340              * For Alpha and SPARC this is a 5 argument syscall, with
9341              * a 'restorer' parameter which must be copied into the
9342              * sa_restorer field of the sigaction struct.
9343              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9344              * and arg5 is the sigsetsize.
9345              */
9346 #if defined(TARGET_ALPHA)
9347             target_ulong sigsetsize = arg4;
9348             target_ulong restorer = arg5;
9349 #elif defined(TARGET_SPARC)
9350             target_ulong restorer = arg4;
9351             target_ulong sigsetsize = arg5;
9352 #else
9353             target_ulong sigsetsize = arg4;
9354             target_ulong restorer = 0;
9355 #endif
9356             struct target_sigaction *act = NULL;
9357             struct target_sigaction *oact = NULL;
9358 
9359             if (sigsetsize != sizeof(target_sigset_t)) {
9360                 return -TARGET_EINVAL;
9361             }
9362             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9363                 return -TARGET_EFAULT;
9364             }
9365             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9366                 ret = -TARGET_EFAULT;
9367             } else {
9368                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9369                 if (oact) {
9370                     unlock_user_struct(oact, arg3, 1);
9371                 }
9372             }
9373             if (act) {
9374                 unlock_user_struct(act, arg2, 0);
9375             }
9376         }
9377         return ret;
9378 #ifdef TARGET_NR_sgetmask /* not on alpha */
9379     case TARGET_NR_sgetmask:
9380         {
9381             sigset_t cur_set;
9382             abi_ulong target_set;
9383             ret = do_sigprocmask(0, NULL, &cur_set);
9384             if (!ret) {
9385                 host_to_target_old_sigset(&target_set, &cur_set);
9386                 ret = target_set;
9387             }
9388         }
9389         return ret;
9390 #endif
9391 #ifdef TARGET_NR_ssetmask /* not on alpha */
9392     case TARGET_NR_ssetmask:
9393         {
9394             sigset_t set, oset;
9395             abi_ulong target_set = arg1;
9396             target_to_host_old_sigset(&set, &target_set);
9397             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9398             if (!ret) {
9399                 host_to_target_old_sigset(&target_set, &oset);
9400                 ret = target_set;
9401             }
9402         }
9403         return ret;
9404 #endif
9405 #ifdef TARGET_NR_sigprocmask
9406     case TARGET_NR_sigprocmask:
9407         {
9408 #if defined(TARGET_ALPHA)
9409             sigset_t set, oldset;
9410             abi_ulong mask;
9411             int how;
9412 
9413             switch (arg1) {
9414             case TARGET_SIG_BLOCK:
9415                 how = SIG_BLOCK;
9416                 break;
9417             case TARGET_SIG_UNBLOCK:
9418                 how = SIG_UNBLOCK;
9419                 break;
9420             case TARGET_SIG_SETMASK:
9421                 how = SIG_SETMASK;
9422                 break;
9423             default:
9424                 return -TARGET_EINVAL;
9425             }
9426             mask = arg2;
9427             target_to_host_old_sigset(&set, &mask);
9428 
9429             ret = do_sigprocmask(how, &set, &oldset);
9430             if (!is_error(ret)) {
9431                 host_to_target_old_sigset(&mask, &oldset);
9432                 ret = mask;
9433                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9434             }
9435 #else
9436             sigset_t set, oldset, *set_ptr;
9437             int how;
9438 
9439             if (arg2) {
9440                 switch (arg1) {
9441                 case TARGET_SIG_BLOCK:
9442                     how = SIG_BLOCK;
9443                     break;
9444                 case TARGET_SIG_UNBLOCK:
9445                     how = SIG_UNBLOCK;
9446                     break;
9447                 case TARGET_SIG_SETMASK:
9448                     how = SIG_SETMASK;
9449                     break;
9450                 default:
9451                     return -TARGET_EINVAL;
9452                 }
9453                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9454                     return -TARGET_EFAULT;
9455                 target_to_host_old_sigset(&set, p);
9456                 unlock_user(p, arg2, 0);
9457                 set_ptr = &set;
9458             } else {
9459                 how = 0;
9460                 set_ptr = NULL;
9461             }
9462             ret = do_sigprocmask(how, set_ptr, &oldset);
9463             if (!is_error(ret) && arg3) {
9464                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9465                     return -TARGET_EFAULT;
9466                 host_to_target_old_sigset(p, &oldset);
9467                 unlock_user(p, arg3, sizeof(target_sigset_t));
9468             }
9469 #endif
9470         }
9471         return ret;
9472 #endif
9473     case TARGET_NR_rt_sigprocmask:
9474         {
9475             int how = arg1;
9476             sigset_t set, oldset, *set_ptr;
9477 
9478             if (arg4 != sizeof(target_sigset_t)) {
9479                 return -TARGET_EINVAL;
9480             }
9481 
9482             if (arg2) {
9483                 switch(how) {
9484                 case TARGET_SIG_BLOCK:
9485                     how = SIG_BLOCK;
9486                     break;
9487                 case TARGET_SIG_UNBLOCK:
9488                     how = SIG_UNBLOCK;
9489                     break;
9490                 case TARGET_SIG_SETMASK:
9491                     how = SIG_SETMASK;
9492                     break;
9493                 default:
9494                     return -TARGET_EINVAL;
9495                 }
9496                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9497                     return -TARGET_EFAULT;
9498                 target_to_host_sigset(&set, p);
9499                 unlock_user(p, arg2, 0);
9500                 set_ptr = &set;
9501             } else {
9502                 how = 0;
9503                 set_ptr = NULL;
9504             }
9505             ret = do_sigprocmask(how, set_ptr, &oldset);
9506             if (!is_error(ret) && arg3) {
9507                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9508                     return -TARGET_EFAULT;
9509                 host_to_target_sigset(p, &oldset);
9510                 unlock_user(p, arg3, sizeof(target_sigset_t));
9511             }
9512         }
9513         return ret;
9514 #ifdef TARGET_NR_sigpending
9515     case TARGET_NR_sigpending:
9516         {
9517             sigset_t set;
9518             ret = get_errno(sigpending(&set));
9519             if (!is_error(ret)) {
9520                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9521                     return -TARGET_EFAULT;
9522                 host_to_target_old_sigset(p, &set);
9523                 unlock_user(p, arg1, sizeof(target_sigset_t));
9524             }
9525         }
9526         return ret;
9527 #endif
9528     case TARGET_NR_rt_sigpending:
9529         {
9530             sigset_t set;
9531 
9532             /* Yes, this check is >, not != like most. We follow the kernel's
9533              * logic and it does it like this because it implements
9534              * NR_sigpending through the same code path, and in that case
9535              * the old_sigset_t is smaller in size.
9536              */
9537             if (arg2 > sizeof(target_sigset_t)) {
9538                 return -TARGET_EINVAL;
9539             }
9540 
9541             ret = get_errno(sigpending(&set));
9542             if (!is_error(ret)) {
9543                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9544                     return -TARGET_EFAULT;
9545                 host_to_target_sigset(p, &set);
9546                 unlock_user(p, arg1, sizeof(target_sigset_t));
9547             }
9548         }
9549         return ret;
9550 #ifdef TARGET_NR_sigsuspend
9551     case TARGET_NR_sigsuspend:
9552         {
9553             TaskState *ts = cpu->opaque;
9554 #if defined(TARGET_ALPHA)
9555             abi_ulong mask = arg1;
9556             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9557 #else
9558             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9559                 return -TARGET_EFAULT;
9560             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9561             unlock_user(p, arg1, 0);
9562 #endif
9563             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9564                                                SIGSET_T_SIZE));
9565             if (ret != -QEMU_ERESTARTSYS) {
9566                 ts->in_sigsuspend = 1;
9567             }
9568         }
9569         return ret;
9570 #endif
9571     case TARGET_NR_rt_sigsuspend:
9572         {
9573             TaskState *ts = cpu->opaque;
9574 
9575             if (arg2 != sizeof(target_sigset_t)) {
9576                 return -TARGET_EINVAL;
9577             }
9578             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9579                 return -TARGET_EFAULT;
9580             target_to_host_sigset(&ts->sigsuspend_mask, p);
9581             unlock_user(p, arg1, 0);
9582             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9583                                                SIGSET_T_SIZE));
9584             if (ret != -QEMU_ERESTARTSYS) {
9585                 ts->in_sigsuspend = 1;
9586             }
9587         }
9588         return ret;
9589 #ifdef TARGET_NR_rt_sigtimedwait
9590     case TARGET_NR_rt_sigtimedwait:
9591         {
9592             sigset_t set;
9593             struct timespec uts, *puts;
9594             siginfo_t uinfo;
9595 
9596             if (arg4 != sizeof(target_sigset_t)) {
9597                 return -TARGET_EINVAL;
9598             }
9599 
9600             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9601                 return -TARGET_EFAULT;
9602             target_to_host_sigset(&set, p);
9603             unlock_user(p, arg1, 0);
9604             if (arg3) {
9605                 puts = &uts;
9606                 if (target_to_host_timespec(puts, arg3)) {
9607                     return -TARGET_EFAULT;
9608                 }
9609             } else {
9610                 puts = NULL;
9611             }
9612             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9613                                                  SIGSET_T_SIZE));
9614             if (!is_error(ret)) {
9615                 if (arg2) {
9616                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9617                                   0);
9618                     if (!p) {
9619                         return -TARGET_EFAULT;
9620                     }
9621                     host_to_target_siginfo(p, &uinfo);
9622                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9623                 }
9624                 ret = host_to_target_signal(ret);
9625             }
9626         }
9627         return ret;
9628 #endif
9629 #ifdef TARGET_NR_rt_sigtimedwait_time64
9630     case TARGET_NR_rt_sigtimedwait_time64:
9631         {
9632             sigset_t set;
9633             struct timespec uts, *puts;
9634             siginfo_t uinfo;
9635 
9636             if (arg4 != sizeof(target_sigset_t)) {
9637                 return -TARGET_EINVAL;
9638             }
9639 
9640             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9641             if (!p) {
9642                 return -TARGET_EFAULT;
9643             }
9644             target_to_host_sigset(&set, p);
9645             unlock_user(p, arg1, 0);
9646             if (arg3) {
9647                 puts = &uts;
9648                 if (target_to_host_timespec64(puts, arg3)) {
9649                     return -TARGET_EFAULT;
9650                 }
9651             } else {
9652                 puts = NULL;
9653             }
9654             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9655                                                  SIGSET_T_SIZE));
9656             if (!is_error(ret)) {
9657                 if (arg2) {
9658                     p = lock_user(VERIFY_WRITE, arg2,
9659                                   sizeof(target_siginfo_t), 0);
9660                     if (!p) {
9661                         return -TARGET_EFAULT;
9662                     }
9663                     host_to_target_siginfo(p, &uinfo);
9664                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9665                 }
9666                 ret = host_to_target_signal(ret);
9667             }
9668         }
9669         return ret;
9670 #endif
9671     case TARGET_NR_rt_sigqueueinfo:
9672         {
9673             siginfo_t uinfo;
9674 
9675             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9676             if (!p) {
9677                 return -TARGET_EFAULT;
9678             }
9679             target_to_host_siginfo(&uinfo, p);
9680             unlock_user(p, arg3, 0);
9681             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9682         }
9683         return ret;
9684     case TARGET_NR_rt_tgsigqueueinfo:
9685         {
9686             siginfo_t uinfo;
9687 
9688             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9689             if (!p) {
9690                 return -TARGET_EFAULT;
9691             }
9692             target_to_host_siginfo(&uinfo, p);
9693             unlock_user(p, arg4, 0);
9694             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9695         }
9696         return ret;
9697 #ifdef TARGET_NR_sigreturn
9698     case TARGET_NR_sigreturn:
9699         if (block_signals()) {
9700             return -QEMU_ERESTARTSYS;
9701         }
9702         return do_sigreturn(cpu_env);
9703 #endif
9704     case TARGET_NR_rt_sigreturn:
9705         if (block_signals()) {
9706             return -QEMU_ERESTARTSYS;
9707         }
9708         return do_rt_sigreturn(cpu_env);
9709     case TARGET_NR_sethostname:
9710         if (!(p = lock_user_string(arg1)))
9711             return -TARGET_EFAULT;
9712         ret = get_errno(sethostname(p, arg2));
9713         unlock_user(p, arg1, 0);
9714         return ret;
9715 #ifdef TARGET_NR_setrlimit
9716     case TARGET_NR_setrlimit:
9717         {
9718             int resource = target_to_host_resource(arg1);
9719             struct target_rlimit *target_rlim;
9720             struct rlimit rlim;
9721             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9722                 return -TARGET_EFAULT;
9723             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9724             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9725             unlock_user_struct(target_rlim, arg2, 0);
9726             /*
9727              * If we just passed through resource limit settings for memory then
9728              * they would also apply to QEMU's own allocations, and QEMU will
9729              * crash or hang or die if its allocations fail. Ideally we would
9730              * track the guest allocations in QEMU and apply the limits ourselves.
9731              * For now, just tell the guest the call succeeded but don't actually
9732              * limit anything.
9733              */
9734             if (resource != RLIMIT_AS &&
9735                 resource != RLIMIT_DATA &&
9736                 resource != RLIMIT_STACK) {
9737                 return get_errno(setrlimit(resource, &rlim));
9738             } else {
9739                 return 0;
9740             }
9741         }
9742 #endif
9743 #ifdef TARGET_NR_getrlimit
9744     case TARGET_NR_getrlimit:
9745         {
9746             int resource = target_to_host_resource(arg1);
9747             struct target_rlimit *target_rlim;
9748             struct rlimit rlim;
9749 
9750             ret = get_errno(getrlimit(resource, &rlim));
9751             if (!is_error(ret)) {
9752                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9753                     return -TARGET_EFAULT;
9754                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9755                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9756                 unlock_user_struct(target_rlim, arg2, 1);
9757             }
9758         }
9759         return ret;
9760 #endif
9761     case TARGET_NR_getrusage:
9762         {
9763             struct rusage rusage;
9764             ret = get_errno(getrusage(arg1, &rusage));
9765             if (!is_error(ret)) {
9766                 ret = host_to_target_rusage(arg2, &rusage);
9767             }
9768         }
9769         return ret;
9770 #if defined(TARGET_NR_gettimeofday)
9771     case TARGET_NR_gettimeofday:
9772         {
9773             struct timeval tv;
9774             struct timezone tz;
9775 
9776             ret = get_errno(gettimeofday(&tv, &tz));
9777             if (!is_error(ret)) {
9778                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9779                     return -TARGET_EFAULT;
9780                 }
9781                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9782                     return -TARGET_EFAULT;
9783                 }
9784             }
9785         }
9786         return ret;
9787 #endif
9788 #if defined(TARGET_NR_settimeofday)
9789     case TARGET_NR_settimeofday:
9790         {
9791             struct timeval tv, *ptv = NULL;
9792             struct timezone tz, *ptz = NULL;
9793 
9794             if (arg1) {
9795                 if (copy_from_user_timeval(&tv, arg1)) {
9796                     return -TARGET_EFAULT;
9797                 }
9798                 ptv = &tv;
9799             }
9800 
9801             if (arg2) {
9802                 if (copy_from_user_timezone(&tz, arg2)) {
9803                     return -TARGET_EFAULT;
9804                 }
9805                 ptz = &tz;
9806             }
9807 
9808             return get_errno(settimeofday(ptv, ptz));
9809         }
9810 #endif
9811 #if defined(TARGET_NR_select)
9812     case TARGET_NR_select:
9813 #if defined(TARGET_WANT_NI_OLD_SELECT)
9814         /* some architectures used to have old_select here
9815          * but now ENOSYS it.
9816          */
9817         ret = -TARGET_ENOSYS;
9818 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9819         ret = do_old_select(arg1);
9820 #else
9821         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9822 #endif
9823         return ret;
9824 #endif
9825 #ifdef TARGET_NR_pselect6
9826     case TARGET_NR_pselect6:
9827         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9828 #endif
9829 #ifdef TARGET_NR_pselect6_time64
9830     case TARGET_NR_pselect6_time64:
9831         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9832 #endif
9833 #ifdef TARGET_NR_symlink
9834     case TARGET_NR_symlink:
9835         {
9836             void *p2;
9837             p = lock_user_string(arg1);
9838             p2 = lock_user_string(arg2);
9839             if (!p || !p2)
9840                 ret = -TARGET_EFAULT;
9841             else
9842                 ret = get_errno(symlink(p, p2));
9843             unlock_user(p2, arg2, 0);
9844             unlock_user(p, arg1, 0);
9845         }
9846         return ret;
9847 #endif
9848 #if defined(TARGET_NR_symlinkat)
9849     case TARGET_NR_symlinkat:
9850         {
9851             void *p2;
9852             p  = lock_user_string(arg1);
9853             p2 = lock_user_string(arg3);
9854             if (!p || !p2)
9855                 ret = -TARGET_EFAULT;
9856             else
9857                 ret = get_errno(symlinkat(p, arg2, p2));
9858             unlock_user(p2, arg3, 0);
9859             unlock_user(p, arg1, 0);
9860         }
9861         return ret;
9862 #endif
9863 #ifdef TARGET_NR_readlink
9864     case TARGET_NR_readlink:
9865         {
9866             void *p2;
9867             p = lock_user_string(arg1);
9868             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9869             if (!p || !p2) {
9870                 ret = -TARGET_EFAULT;
9871             } else if (!arg3) {
9872                 /* Short circuit this for the magic exe check. */
9873                 ret = -TARGET_EINVAL;
9874             } else if (is_proc_myself((const char *)p, "exe")) {
9875                 char real[PATH_MAX], *temp;
9876                 temp = realpath(exec_path, real);
9877                 /* Return value is # of bytes that we wrote to the buffer. */
9878                 if (temp == NULL) {
9879                     ret = get_errno(-1);
9880                 } else {
9881                     /* Don't worry about sign mismatch as earlier mapping
9882                      * logic would have thrown a bad address error. */
9883                     ret = MIN(strlen(real), arg3);
9884                     /* We cannot NUL terminate the string. */
9885                     memcpy(p2, real, ret);
9886                 }
9887             } else {
9888                 ret = get_errno(readlink(path(p), p2, arg3));
9889             }
9890             unlock_user(p2, arg2, ret);
9891             unlock_user(p, arg1, 0);
9892         }
9893         return ret;
9894 #endif
9895 #if defined(TARGET_NR_readlinkat)
9896     case TARGET_NR_readlinkat:
9897         {
9898             void *p2;
9899             p  = lock_user_string(arg2);
9900             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9901             if (!p || !p2) {
9902                 ret = -TARGET_EFAULT;
9903             } else if (is_proc_myself((const char *)p, "exe")) {
9904                 char real[PATH_MAX], *temp;
9905                 temp = realpath(exec_path, real);
9906                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9907                 snprintf((char *)p2, arg4, "%s", real);
9908             } else {
9909                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9910             }
9911             unlock_user(p2, arg3, ret);
9912             unlock_user(p, arg2, 0);
9913         }
9914         return ret;
9915 #endif
9916 #ifdef TARGET_NR_swapon
9917     case TARGET_NR_swapon:
9918         if (!(p = lock_user_string(arg1)))
9919             return -TARGET_EFAULT;
9920         ret = get_errno(swapon(p, arg2));
9921         unlock_user(p, arg1, 0);
9922         return ret;
9923 #endif
9924     case TARGET_NR_reboot:
9925         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9926            /* arg4 must be ignored in all other cases */
9927            p = lock_user_string(arg4);
9928            if (!p) {
9929                return -TARGET_EFAULT;
9930            }
9931            ret = get_errno(reboot(arg1, arg2, arg3, p));
9932            unlock_user(p, arg4, 0);
9933         } else {
9934            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9935         }
9936         return ret;
9937 #ifdef TARGET_NR_mmap
9938     case TARGET_NR_mmap:
9939 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9940     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9941     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9942     || defined(TARGET_S390X)
9943         {
9944             abi_ulong *v;
9945             abi_ulong v1, v2, v3, v4, v5, v6;
9946             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9947                 return -TARGET_EFAULT;
9948             v1 = tswapal(v[0]);
9949             v2 = tswapal(v[1]);
9950             v3 = tswapal(v[2]);
9951             v4 = tswapal(v[3]);
9952             v5 = tswapal(v[4]);
9953             v6 = tswapal(v[5]);
9954             unlock_user(v, arg1, 0);
9955             ret = get_errno(target_mmap(v1, v2, v3,
9956                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9957                                         v5, v6));
9958         }
9959 #else
9960         /* mmap pointers are always untagged */
9961         ret = get_errno(target_mmap(arg1, arg2, arg3,
9962                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9963                                     arg5,
9964                                     arg6));
9965 #endif
9966         return ret;
9967 #endif
9968 #ifdef TARGET_NR_mmap2
9969     case TARGET_NR_mmap2:
9970 #ifndef MMAP_SHIFT
9971 #define MMAP_SHIFT 12
9972 #endif
9973         ret = target_mmap(arg1, arg2, arg3,
9974                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9975                           arg5, arg6 << MMAP_SHIFT);
9976         return get_errno(ret);
9977 #endif
9978     case TARGET_NR_munmap:
9979         arg1 = cpu_untagged_addr(cpu, arg1);
9980         return get_errno(target_munmap(arg1, arg2));
9981     case TARGET_NR_mprotect:
9982         arg1 = cpu_untagged_addr(cpu, arg1);
9983         {
9984             TaskState *ts = cpu->opaque;
9985             /* Special hack to detect libc making the stack executable.  */
9986             if ((arg3 & PROT_GROWSDOWN)
9987                 && arg1 >= ts->info->stack_limit
9988                 && arg1 <= ts->info->start_stack) {
9989                 arg3 &= ~PROT_GROWSDOWN;
9990                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9991                 arg1 = ts->info->stack_limit;
9992             }
9993         }
9994         return get_errno(target_mprotect(arg1, arg2, arg3));
9995 #ifdef TARGET_NR_mremap
9996     case TARGET_NR_mremap:
9997         arg1 = cpu_untagged_addr(cpu, arg1);
9998         /* mremap new_addr (arg5) is always untagged */
9999         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10000 #endif
10001         /* ??? msync/mlock/munlock are broken for softmmu.  */
10002 #ifdef TARGET_NR_msync
10003     case TARGET_NR_msync:
10004         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10005 #endif
10006 #ifdef TARGET_NR_mlock
10007     case TARGET_NR_mlock:
10008         return get_errno(mlock(g2h(cpu, arg1), arg2));
10009 #endif
10010 #ifdef TARGET_NR_munlock
10011     case TARGET_NR_munlock:
10012         return get_errno(munlock(g2h(cpu, arg1), arg2));
10013 #endif
10014 #ifdef TARGET_NR_mlockall
10015     case TARGET_NR_mlockall:
10016         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10017 #endif
10018 #ifdef TARGET_NR_munlockall
10019     case TARGET_NR_munlockall:
10020         return get_errno(munlockall());
10021 #endif
10022 #ifdef TARGET_NR_truncate
10023     case TARGET_NR_truncate:
10024         if (!(p = lock_user_string(arg1)))
10025             return -TARGET_EFAULT;
10026         ret = get_errno(truncate(p, arg2));
10027         unlock_user(p, arg1, 0);
10028         return ret;
10029 #endif
10030 #ifdef TARGET_NR_ftruncate
10031     case TARGET_NR_ftruncate:
10032         return get_errno(ftruncate(arg1, arg2));
10033 #endif
10034     case TARGET_NR_fchmod:
10035         return get_errno(fchmod(arg1, arg2));
10036 #if defined(TARGET_NR_fchmodat)
10037     case TARGET_NR_fchmodat:
10038         if (!(p = lock_user_string(arg2)))
10039             return -TARGET_EFAULT;
10040         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10041         unlock_user(p, arg2, 0);
10042         return ret;
10043 #endif
10044     case TARGET_NR_getpriority:
10045         /* Note that negative values are valid for getpriority, so we must
10046            differentiate based on errno settings.  */
10047         errno = 0;
10048         ret = getpriority(arg1, arg2);
10049         if (ret == -1 && errno != 0) {
10050             return -host_to_target_errno(errno);
10051         }
10052 #ifdef TARGET_ALPHA
10053         /* Return value is the unbiased priority.  Signal no error.  */
10054         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10055 #else
10056         /* Return value is a biased priority to avoid negative numbers.  */
10057         ret = 20 - ret;
10058 #endif
10059         return ret;
10060     case TARGET_NR_setpriority:
10061         return get_errno(setpriority(arg1, arg2, arg3));
10062 #ifdef TARGET_NR_statfs
10063     case TARGET_NR_statfs:
10064         if (!(p = lock_user_string(arg1))) {
10065             return -TARGET_EFAULT;
10066         }
10067         ret = get_errno(statfs(path(p), &stfs));
10068         unlock_user(p, arg1, 0);
10069     convert_statfs:
10070         if (!is_error(ret)) {
10071             struct target_statfs *target_stfs;
10072 
10073             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10074                 return -TARGET_EFAULT;
10075             __put_user(stfs.f_type, &target_stfs->f_type);
10076             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10077             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10078             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10079             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10080             __put_user(stfs.f_files, &target_stfs->f_files);
10081             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10082             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10083             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10084             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10085             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10086 #ifdef _STATFS_F_FLAGS
10087             __put_user(stfs.f_flags, &target_stfs->f_flags);
10088 #else
10089             __put_user(0, &target_stfs->f_flags);
10090 #endif
10091             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10092             unlock_user_struct(target_stfs, arg2, 1);
10093         }
10094         return ret;
10095 #endif
10096 #ifdef TARGET_NR_fstatfs
10097     case TARGET_NR_fstatfs:
10098         ret = get_errno(fstatfs(arg1, &stfs));
10099         goto convert_statfs;
10100 #endif
10101 #ifdef TARGET_NR_statfs64
10102     case TARGET_NR_statfs64:
10103         if (!(p = lock_user_string(arg1))) {
10104             return -TARGET_EFAULT;
10105         }
10106         ret = get_errno(statfs(path(p), &stfs));
10107         unlock_user(p, arg1, 0);
10108     convert_statfs64:
10109         if (!is_error(ret)) {
10110             struct target_statfs64 *target_stfs;
10111 
10112             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10113                 return -TARGET_EFAULT;
10114             __put_user(stfs.f_type, &target_stfs->f_type);
10115             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10116             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10117             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10118             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10119             __put_user(stfs.f_files, &target_stfs->f_files);
10120             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10121             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10122             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10123             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10124             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10125 #ifdef _STATFS_F_FLAGS
10126             __put_user(stfs.f_flags, &target_stfs->f_flags);
10127 #else
10128             __put_user(0, &target_stfs->f_flags);
10129 #endif
10130             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10131             unlock_user_struct(target_stfs, arg3, 1);
10132         }
10133         return ret;
10134     case TARGET_NR_fstatfs64:
10135         ret = get_errno(fstatfs(arg1, &stfs));
10136         goto convert_statfs64;
10137 #endif
10138 #ifdef TARGET_NR_socketcall
10139     case TARGET_NR_socketcall:
10140         return do_socketcall(arg1, arg2);
10141 #endif
10142 #ifdef TARGET_NR_accept
10143     case TARGET_NR_accept:
10144         return do_accept4(arg1, arg2, arg3, 0);
10145 #endif
10146 #ifdef TARGET_NR_accept4
10147     case TARGET_NR_accept4:
10148         return do_accept4(arg1, arg2, arg3, arg4);
10149 #endif
10150 #ifdef TARGET_NR_bind
10151     case TARGET_NR_bind:
10152         return do_bind(arg1, arg2, arg3);
10153 #endif
10154 #ifdef TARGET_NR_connect
10155     case TARGET_NR_connect:
10156         return do_connect(arg1, arg2, arg3);
10157 #endif
10158 #ifdef TARGET_NR_getpeername
10159     case TARGET_NR_getpeername:
10160         return do_getpeername(arg1, arg2, arg3);
10161 #endif
10162 #ifdef TARGET_NR_getsockname
10163     case TARGET_NR_getsockname:
10164         return do_getsockname(arg1, arg2, arg3);
10165 #endif
10166 #ifdef TARGET_NR_getsockopt
10167     case TARGET_NR_getsockopt:
10168         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10169 #endif
10170 #ifdef TARGET_NR_listen
10171     case TARGET_NR_listen:
10172         return get_errno(listen(arg1, arg2));
10173 #endif
10174 #ifdef TARGET_NR_recv
10175     case TARGET_NR_recv:
10176         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10177 #endif
10178 #ifdef TARGET_NR_recvfrom
10179     case TARGET_NR_recvfrom:
10180         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10181 #endif
10182 #ifdef TARGET_NR_recvmsg
10183     case TARGET_NR_recvmsg:
10184         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10185 #endif
10186 #ifdef TARGET_NR_send
10187     case TARGET_NR_send:
10188         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10189 #endif
10190 #ifdef TARGET_NR_sendmsg
10191     case TARGET_NR_sendmsg:
10192         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10193 #endif
10194 #ifdef TARGET_NR_sendmmsg
10195     case TARGET_NR_sendmmsg:
10196         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10197 #endif
10198 #ifdef TARGET_NR_recvmmsg
10199     case TARGET_NR_recvmmsg:
10200         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10201 #endif
10202 #ifdef TARGET_NR_sendto
10203     case TARGET_NR_sendto:
10204         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10205 #endif
10206 #ifdef TARGET_NR_shutdown
10207     case TARGET_NR_shutdown:
10208         return get_errno(shutdown(arg1, arg2));
10209 #endif
10210 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10211     case TARGET_NR_getrandom:
10212         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10213         if (!p) {
10214             return -TARGET_EFAULT;
10215         }
10216         ret = get_errno(getrandom(p, arg2, arg3));
10217         unlock_user(p, arg1, ret);
10218         return ret;
10219 #endif
10220 #ifdef TARGET_NR_socket
10221     case TARGET_NR_socket:
10222         return do_socket(arg1, arg2, arg3);
10223 #endif
10224 #ifdef TARGET_NR_socketpair
10225     case TARGET_NR_socketpair:
10226         return do_socketpair(arg1, arg2, arg3, arg4);
10227 #endif
10228 #ifdef TARGET_NR_setsockopt
10229     case TARGET_NR_setsockopt:
10230         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10231 #endif
10232 #if defined(TARGET_NR_syslog)
10233     case TARGET_NR_syslog:
10234         {
10235             int len = arg2;
10236 
10237             switch (arg1) {
10238             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10239             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10240             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10241             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10242             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10243             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10244             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10245             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10246                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10247             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10248             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10249             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10250                 {
10251                     if (len < 0) {
10252                         return -TARGET_EINVAL;
10253                     }
10254                     if (len == 0) {
10255                         return 0;
10256                     }
10257                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10258                     if (!p) {
10259                         return -TARGET_EFAULT;
10260                     }
10261                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10262                     unlock_user(p, arg2, arg3);
10263                 }
10264                 return ret;
10265             default:
10266                 return -TARGET_EINVAL;
10267             }
10268         }
10269         break;
10270 #endif
10271     case TARGET_NR_setitimer:
10272         {
10273             struct itimerval value, ovalue, *pvalue;
10274 
10275             if (arg2) {
10276                 pvalue = &value;
10277                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10278                     || copy_from_user_timeval(&pvalue->it_value,
10279                                               arg2 + sizeof(struct target_timeval)))
10280                     return -TARGET_EFAULT;
10281             } else {
10282                 pvalue = NULL;
10283             }
10284             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10285             if (!is_error(ret) && arg3) {
10286                 if (copy_to_user_timeval(arg3,
10287                                          &ovalue.it_interval)
10288                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10289                                             &ovalue.it_value))
10290                     return -TARGET_EFAULT;
10291             }
10292         }
10293         return ret;
10294     case TARGET_NR_getitimer:
10295         {
10296             struct itimerval value;
10297 
10298             ret = get_errno(getitimer(arg1, &value));
10299             if (!is_error(ret) && arg2) {
10300                 if (copy_to_user_timeval(arg2,
10301                                          &value.it_interval)
10302                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10303                                             &value.it_value))
10304                     return -TARGET_EFAULT;
10305             }
10306         }
10307         return ret;
10308 #ifdef TARGET_NR_stat
10309     case TARGET_NR_stat:
10310         if (!(p = lock_user_string(arg1))) {
10311             return -TARGET_EFAULT;
10312         }
10313         ret = get_errno(stat(path(p), &st));
10314         unlock_user(p, arg1, 0);
10315         goto do_stat;
10316 #endif
10317 #ifdef TARGET_NR_lstat
10318     case TARGET_NR_lstat:
10319         if (!(p = lock_user_string(arg1))) {
10320             return -TARGET_EFAULT;
10321         }
10322         ret = get_errno(lstat(path(p), &st));
10323         unlock_user(p, arg1, 0);
10324         goto do_stat;
10325 #endif
10326 #ifdef TARGET_NR_fstat
10327     case TARGET_NR_fstat:
10328         {
10329             ret = get_errno(fstat(arg1, &st));
10330 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10331         do_stat:
10332 #endif
10333             if (!is_error(ret)) {
10334                 struct target_stat *target_st;
10335 
10336                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10337                     return -TARGET_EFAULT;
10338                 memset(target_st, 0, sizeof(*target_st));
10339                 __put_user(st.st_dev, &target_st->st_dev);
10340                 __put_user(st.st_ino, &target_st->st_ino);
10341                 __put_user(st.st_mode, &target_st->st_mode);
10342                 __put_user(st.st_uid, &target_st->st_uid);
10343                 __put_user(st.st_gid, &target_st->st_gid);
10344                 __put_user(st.st_nlink, &target_st->st_nlink);
10345                 __put_user(st.st_rdev, &target_st->st_rdev);
10346                 __put_user(st.st_size, &target_st->st_size);
10347                 __put_user(st.st_blksize, &target_st->st_blksize);
10348                 __put_user(st.st_blocks, &target_st->st_blocks);
10349                 __put_user(st.st_atime, &target_st->target_st_atime);
10350                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10351                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10352 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10353                 __put_user(st.st_atim.tv_nsec,
10354                            &target_st->target_st_atime_nsec);
10355                 __put_user(st.st_mtim.tv_nsec,
10356                            &target_st->target_st_mtime_nsec);
10357                 __put_user(st.st_ctim.tv_nsec,
10358                            &target_st->target_st_ctime_nsec);
10359 #endif
10360                 unlock_user_struct(target_st, arg2, 1);
10361             }
10362         }
10363         return ret;
10364 #endif
10365     case TARGET_NR_vhangup:
10366         return get_errno(vhangup());
10367 #ifdef TARGET_NR_syscall
10368     case TARGET_NR_syscall:
10369         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10370                           arg6, arg7, arg8, 0);
10371 #endif
10372 #if defined(TARGET_NR_wait4)
10373     case TARGET_NR_wait4:
10374         {
10375             int status;
10376             abi_long status_ptr = arg2;
10377             struct rusage rusage, *rusage_ptr;
10378             abi_ulong target_rusage = arg4;
10379             abi_long rusage_err;
10380             if (target_rusage)
10381                 rusage_ptr = &rusage;
10382             else
10383                 rusage_ptr = NULL;
10384             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10385             if (!is_error(ret)) {
10386                 if (status_ptr && ret) {
10387                     status = host_to_target_waitstatus(status);
10388                     if (put_user_s32(status, status_ptr))
10389                         return -TARGET_EFAULT;
10390                 }
10391                 if (target_rusage) {
10392                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10393                     if (rusage_err) {
10394                         ret = rusage_err;
10395                     }
10396                 }
10397             }
10398         }
10399         return ret;
10400 #endif
10401 #ifdef TARGET_NR_swapoff
10402     case TARGET_NR_swapoff:
10403         if (!(p = lock_user_string(arg1)))
10404             return -TARGET_EFAULT;
10405         ret = get_errno(swapoff(p));
10406         unlock_user(p, arg1, 0);
10407         return ret;
10408 #endif
10409     case TARGET_NR_sysinfo:
10410         {
10411             struct target_sysinfo *target_value;
10412             struct sysinfo value;
10413             ret = get_errno(sysinfo(&value));
10414             if (!is_error(ret) && arg1)
10415             {
10416                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10417                     return -TARGET_EFAULT;
10418                 __put_user(value.uptime, &target_value->uptime);
10419                 __put_user(value.loads[0], &target_value->loads[0]);
10420                 __put_user(value.loads[1], &target_value->loads[1]);
10421                 __put_user(value.loads[2], &target_value->loads[2]);
10422                 __put_user(value.totalram, &target_value->totalram);
10423                 __put_user(value.freeram, &target_value->freeram);
10424                 __put_user(value.sharedram, &target_value->sharedram);
10425                 __put_user(value.bufferram, &target_value->bufferram);
10426                 __put_user(value.totalswap, &target_value->totalswap);
10427                 __put_user(value.freeswap, &target_value->freeswap);
10428                 __put_user(value.procs, &target_value->procs);
10429                 __put_user(value.totalhigh, &target_value->totalhigh);
10430                 __put_user(value.freehigh, &target_value->freehigh);
10431                 __put_user(value.mem_unit, &target_value->mem_unit);
10432                 unlock_user_struct(target_value, arg1, 1);
10433             }
10434         }
10435         return ret;
10436 #ifdef TARGET_NR_ipc
10437     case TARGET_NR_ipc:
10438         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10439 #endif
10440 #ifdef TARGET_NR_semget
10441     case TARGET_NR_semget:
10442         return get_errno(semget(arg1, arg2, arg3));
10443 #endif
10444 #ifdef TARGET_NR_semop
10445     case TARGET_NR_semop:
10446         return do_semtimedop(arg1, arg2, arg3, 0, false);
10447 #endif
10448 #ifdef TARGET_NR_semtimedop
10449     case TARGET_NR_semtimedop:
10450         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10451 #endif
10452 #ifdef TARGET_NR_semtimedop_time64
10453     case TARGET_NR_semtimedop_time64:
10454         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10455 #endif
10456 #ifdef TARGET_NR_semctl
10457     case TARGET_NR_semctl:
10458         return do_semctl(arg1, arg2, arg3, arg4);
10459 #endif
10460 #ifdef TARGET_NR_msgctl
10461     case TARGET_NR_msgctl:
10462         return do_msgctl(arg1, arg2, arg3);
10463 #endif
10464 #ifdef TARGET_NR_msgget
10465     case TARGET_NR_msgget:
10466         return get_errno(msgget(arg1, arg2));
10467 #endif
10468 #ifdef TARGET_NR_msgrcv
10469     case TARGET_NR_msgrcv:
10470         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10471 #endif
10472 #ifdef TARGET_NR_msgsnd
10473     case TARGET_NR_msgsnd:
10474         return do_msgsnd(arg1, arg2, arg3, arg4);
10475 #endif
10476 #ifdef TARGET_NR_shmget
10477     case TARGET_NR_shmget:
10478         return get_errno(shmget(arg1, arg2, arg3));
10479 #endif
10480 #ifdef TARGET_NR_shmctl
10481     case TARGET_NR_shmctl:
10482         return do_shmctl(arg1, arg2, arg3);
10483 #endif
10484 #ifdef TARGET_NR_shmat
10485     case TARGET_NR_shmat:
10486         return do_shmat(cpu_env, arg1, arg2, arg3);
10487 #endif
10488 #ifdef TARGET_NR_shmdt
10489     case TARGET_NR_shmdt:
10490         return do_shmdt(arg1);
10491 #endif
10492     case TARGET_NR_fsync:
10493         return get_errno(fsync(arg1));
10494     case TARGET_NR_clone:
10495         /* Linux manages to have three different orderings for its
10496          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10497          * match the kernel's CONFIG_CLONE_* settings.
10498          * Microblaze is further special in that it uses a sixth
10499          * implicit argument to clone for the TLS pointer.
10500          */
10501 #if defined(TARGET_MICROBLAZE)
10502         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10503 #elif defined(TARGET_CLONE_BACKWARDS)
10504         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10505 #elif defined(TARGET_CLONE_BACKWARDS2)
10506         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10507 #else
10508         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10509 #endif
10510         return ret;
10511 #ifdef __NR_exit_group
10512         /* new thread calls */
10513     case TARGET_NR_exit_group:
10514         preexit_cleanup(cpu_env, arg1);
10515         return get_errno(exit_group(arg1));
10516 #endif
10517     case TARGET_NR_setdomainname:
10518         if (!(p = lock_user_string(arg1)))
10519             return -TARGET_EFAULT;
10520         ret = get_errno(setdomainname(p, arg2));
10521         unlock_user(p, arg1, 0);
10522         return ret;
10523     case TARGET_NR_uname:
10524         /* no need to transcode because we use the linux syscall */
10525         {
10526             struct new_utsname * buf;
10527 
10528             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10529                 return -TARGET_EFAULT;
10530             ret = get_errno(sys_uname(buf));
10531             if (!is_error(ret)) {
10532                 /* Overwrite the native machine name with whatever is being
10533                    emulated. */
10534                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10535                           sizeof(buf->machine));
10536                 /* Allow the user to override the reported release.  */
10537                 if (qemu_uname_release && *qemu_uname_release) {
10538                     g_strlcpy(buf->release, qemu_uname_release,
10539                               sizeof(buf->release));
10540                 }
10541             }
10542             unlock_user_struct(buf, arg1, 1);
10543         }
10544         return ret;
10545 #ifdef TARGET_I386
10546     case TARGET_NR_modify_ldt:
10547         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10548 #if !defined(TARGET_X86_64)
10549     case TARGET_NR_vm86:
10550         return do_vm86(cpu_env, arg1, arg2);
10551 #endif
10552 #endif
10553 #if defined(TARGET_NR_adjtimex)
10554     case TARGET_NR_adjtimex:
10555         {
10556             struct timex host_buf;
10557 
10558             if (target_to_host_timex(&host_buf, arg1) != 0) {
10559                 return -TARGET_EFAULT;
10560             }
10561             ret = get_errno(adjtimex(&host_buf));
10562             if (!is_error(ret)) {
10563                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10564                     return -TARGET_EFAULT;
10565                 }
10566             }
10567         }
10568         return ret;
10569 #endif
10570 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10571     case TARGET_NR_clock_adjtime:
10572         {
10573             struct timex htx, *phtx = &htx;
10574 
10575             if (target_to_host_timex(phtx, arg2) != 0) {
10576                 return -TARGET_EFAULT;
10577             }
10578             ret = get_errno(clock_adjtime(arg1, phtx));
10579             if (!is_error(ret) && phtx) {
10580                 if (host_to_target_timex(arg2, phtx) != 0) {
10581                     return -TARGET_EFAULT;
10582                 }
10583             }
10584         }
10585         return ret;
10586 #endif
10587 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10588     case TARGET_NR_clock_adjtime64:
10589         {
10590             struct timex htx;
10591 
10592             if (target_to_host_timex64(&htx, arg2) != 0) {
10593                 return -TARGET_EFAULT;
10594             }
10595             ret = get_errno(clock_adjtime(arg1, &htx));
10596             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10597                     return -TARGET_EFAULT;
10598             }
10599         }
10600         return ret;
10601 #endif
10602     case TARGET_NR_getpgid:
10603         return get_errno(getpgid(arg1));
10604     case TARGET_NR_fchdir:
10605         return get_errno(fchdir(arg1));
10606     case TARGET_NR_personality:
10607         return get_errno(personality(arg1));
10608 #ifdef TARGET_NR__llseek /* Not on alpha */
10609     case TARGET_NR__llseek:
10610         {
10611             int64_t res;
10612 #if !defined(__NR_llseek)
10613             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10614             if (res == -1) {
10615                 ret = get_errno(res);
10616             } else {
10617                 ret = 0;
10618             }
10619 #else
10620             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10621 #endif
10622             if ((ret == 0) && put_user_s64(res, arg4)) {
10623                 return -TARGET_EFAULT;
10624             }
10625         }
10626         return ret;
10627 #endif
10628 #ifdef TARGET_NR_getdents
10629     case TARGET_NR_getdents:
10630         return do_getdents(arg1, arg2, arg3);
10631 #endif /* TARGET_NR_getdents */
10632 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10633     case TARGET_NR_getdents64:
10634         return do_getdents64(arg1, arg2, arg3);
10635 #endif /* TARGET_NR_getdents64 */
10636 #if defined(TARGET_NR__newselect)
10637     case TARGET_NR__newselect:
10638         return do_select(arg1, arg2, arg3, arg4, arg5);
10639 #endif
10640 #ifdef TARGET_NR_poll
10641     case TARGET_NR_poll:
10642         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10643 #endif
10644 #ifdef TARGET_NR_ppoll
10645     case TARGET_NR_ppoll:
10646         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10647 #endif
10648 #ifdef TARGET_NR_ppoll_time64
10649     case TARGET_NR_ppoll_time64:
10650         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10651 #endif
10652     case TARGET_NR_flock:
10653         /* NOTE: the flock constant seems to be the same for every
10654            Linux platform */
10655         return get_errno(safe_flock(arg1, arg2));
10656     case TARGET_NR_readv:
10657         {
10658             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10659             if (vec != NULL) {
10660                 ret = get_errno(safe_readv(arg1, vec, arg3));
10661                 unlock_iovec(vec, arg2, arg3, 1);
10662             } else {
10663                 ret = -host_to_target_errno(errno);
10664             }
10665         }
10666         return ret;
10667     case TARGET_NR_writev:
10668         {
10669             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10670             if (vec != NULL) {
10671                 ret = get_errno(safe_writev(arg1, vec, arg3));
10672                 unlock_iovec(vec, arg2, arg3, 0);
10673             } else {
10674                 ret = -host_to_target_errno(errno);
10675             }
10676         }
10677         return ret;
10678 #if defined(TARGET_NR_preadv)
10679     case TARGET_NR_preadv:
10680         {
10681             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10682             if (vec != NULL) {
10683                 unsigned long low, high;
10684 
10685                 target_to_host_low_high(arg4, arg5, &low, &high);
10686                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10687                 unlock_iovec(vec, arg2, arg3, 1);
10688             } else {
10689                 ret = -host_to_target_errno(errno);
10690            }
10691         }
10692         return ret;
10693 #endif
10694 #if defined(TARGET_NR_pwritev)
10695     case TARGET_NR_pwritev:
10696         {
10697             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10698             if (vec != NULL) {
10699                 unsigned long low, high;
10700 
10701                 target_to_host_low_high(arg4, arg5, &low, &high);
10702                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10703                 unlock_iovec(vec, arg2, arg3, 0);
10704             } else {
10705                 ret = -host_to_target_errno(errno);
10706            }
10707         }
10708         return ret;
10709 #endif
10710     case TARGET_NR_getsid:
10711         return get_errno(getsid(arg1));
10712 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10713     case TARGET_NR_fdatasync:
10714         return get_errno(fdatasync(arg1));
10715 #endif
10716     case TARGET_NR_sched_getaffinity:
10717         {
10718             unsigned int mask_size;
10719             unsigned long *mask;
10720 
10721             /*
10722              * sched_getaffinity needs multiples of ulong, so need to take
10723              * care of mismatches between target ulong and host ulong sizes.
10724              */
10725             if (arg2 & (sizeof(abi_ulong) - 1)) {
10726                 return -TARGET_EINVAL;
10727             }
10728             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10729 
10730             mask = alloca(mask_size);
10731             memset(mask, 0, mask_size);
10732             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10733 
10734             if (!is_error(ret)) {
10735                 if (ret > arg2) {
10736                     /* More data returned than the caller's buffer will fit.
10737                      * This only happens if sizeof(abi_long) < sizeof(long)
10738                      * and the caller passed us a buffer holding an odd number
10739                      * of abi_longs. If the host kernel is actually using the
10740                      * extra 4 bytes then fail EINVAL; otherwise we can just
10741                      * ignore them and only copy the interesting part.
10742                      */
10743                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10744                     if (numcpus > arg2 * 8) {
10745                         return -TARGET_EINVAL;
10746                     }
10747                     ret = arg2;
10748                 }
10749 
10750                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10751                     return -TARGET_EFAULT;
10752                 }
10753             }
10754         }
10755         return ret;
10756     case TARGET_NR_sched_setaffinity:
10757         {
10758             unsigned int mask_size;
10759             unsigned long *mask;
10760 
10761             /*
10762              * sched_setaffinity needs multiples of ulong, so need to take
10763              * care of mismatches between target ulong and host ulong sizes.
10764              */
10765             if (arg2 & (sizeof(abi_ulong) - 1)) {
10766                 return -TARGET_EINVAL;
10767             }
10768             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10769             mask = alloca(mask_size);
10770 
10771             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10772             if (ret) {
10773                 return ret;
10774             }
10775 
10776             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10777         }
10778     case TARGET_NR_getcpu:
10779         {
10780             unsigned cpu, node;
10781             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10782                                        arg2 ? &node : NULL,
10783                                        NULL));
10784             if (is_error(ret)) {
10785                 return ret;
10786             }
10787             if (arg1 && put_user_u32(cpu, arg1)) {
10788                 return -TARGET_EFAULT;
10789             }
10790             if (arg2 && put_user_u32(node, arg2)) {
10791                 return -TARGET_EFAULT;
10792             }
10793         }
10794         return ret;
10795     case TARGET_NR_sched_setparam:
10796         {
10797             struct sched_param *target_schp;
10798             struct sched_param schp;
10799 
10800             if (arg2 == 0) {
10801                 return -TARGET_EINVAL;
10802             }
10803             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10804                 return -TARGET_EFAULT;
10805             schp.sched_priority = tswap32(target_schp->sched_priority);
10806             unlock_user_struct(target_schp, arg2, 0);
10807             return get_errno(sched_setparam(arg1, &schp));
10808         }
10809     case TARGET_NR_sched_getparam:
10810         {
10811             struct sched_param *target_schp;
10812             struct sched_param schp;
10813 
10814             if (arg2 == 0) {
10815                 return -TARGET_EINVAL;
10816             }
10817             ret = get_errno(sched_getparam(arg1, &schp));
10818             if (!is_error(ret)) {
10819                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10820                     return -TARGET_EFAULT;
10821                 target_schp->sched_priority = tswap32(schp.sched_priority);
10822                 unlock_user_struct(target_schp, arg2, 1);
10823             }
10824         }
10825         return ret;
10826     case TARGET_NR_sched_setscheduler:
10827         {
10828             struct sched_param *target_schp;
10829             struct sched_param schp;
10830             if (arg3 == 0) {
10831                 return -TARGET_EINVAL;
10832             }
10833             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10834                 return -TARGET_EFAULT;
10835             schp.sched_priority = tswap32(target_schp->sched_priority);
10836             unlock_user_struct(target_schp, arg3, 0);
10837             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10838         }
10839     case TARGET_NR_sched_getscheduler:
10840         return get_errno(sched_getscheduler(arg1));
10841     case TARGET_NR_sched_getattr:
10842         {
10843             struct target_sched_attr *target_scha;
10844             struct sched_attr scha;
10845             if (arg2 == 0) {
10846                 return -TARGET_EINVAL;
10847             }
10848             if (arg3 > sizeof(scha)) {
10849                 arg3 = sizeof(scha);
10850             }
10851             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10852             if (!is_error(ret)) {
10853                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10854                 if (!target_scha) {
10855                     return -TARGET_EFAULT;
10856                 }
10857                 target_scha->size = tswap32(scha.size);
10858                 target_scha->sched_policy = tswap32(scha.sched_policy);
10859                 target_scha->sched_flags = tswap64(scha.sched_flags);
10860                 target_scha->sched_nice = tswap32(scha.sched_nice);
10861                 target_scha->sched_priority = tswap32(scha.sched_priority);
10862                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10863                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10864                 target_scha->sched_period = tswap64(scha.sched_period);
10865                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10866                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10867                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10868                 }
10869                 unlock_user(target_scha, arg2, arg3);
10870             }
10871             return ret;
10872         }
10873     case TARGET_NR_sched_setattr:
10874         {
10875             struct target_sched_attr *target_scha;
10876             struct sched_attr scha;
10877             uint32_t size;
10878             int zeroed;
10879             if (arg2 == 0) {
10880                 return -TARGET_EINVAL;
10881             }
10882             if (get_user_u32(size, arg2)) {
10883                 return -TARGET_EFAULT;
10884             }
10885             if (!size) {
10886                 size = offsetof(struct target_sched_attr, sched_util_min);
10887             }
10888             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10889                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10890                     return -TARGET_EFAULT;
10891                 }
10892                 return -TARGET_E2BIG;
10893             }
10894 
10895             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10896             if (zeroed < 0) {
10897                 return zeroed;
10898             } else if (zeroed == 0) {
10899                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10900                     return -TARGET_EFAULT;
10901                 }
10902                 return -TARGET_E2BIG;
10903             }
10904             if (size > sizeof(struct target_sched_attr)) {
10905                 size = sizeof(struct target_sched_attr);
10906             }
10907 
10908             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10909             if (!target_scha) {
10910                 return -TARGET_EFAULT;
10911             }
10912             scha.size = size;
10913             scha.sched_policy = tswap32(target_scha->sched_policy);
10914             scha.sched_flags = tswap64(target_scha->sched_flags);
10915             scha.sched_nice = tswap32(target_scha->sched_nice);
10916             scha.sched_priority = tswap32(target_scha->sched_priority);
10917             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10918             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10919             scha.sched_period = tswap64(target_scha->sched_period);
10920             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10921                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10922                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10923             }
10924             unlock_user(target_scha, arg2, 0);
10925             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10926         }
10927     case TARGET_NR_sched_yield:
10928         return get_errno(sched_yield());
10929     case TARGET_NR_sched_get_priority_max:
10930         return get_errno(sched_get_priority_max(arg1));
10931     case TARGET_NR_sched_get_priority_min:
10932         return get_errno(sched_get_priority_min(arg1));
10933 #ifdef TARGET_NR_sched_rr_get_interval
10934     case TARGET_NR_sched_rr_get_interval:
10935         {
10936             struct timespec ts;
10937             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10938             if (!is_error(ret)) {
10939                 ret = host_to_target_timespec(arg2, &ts);
10940             }
10941         }
10942         return ret;
10943 #endif
10944 #ifdef TARGET_NR_sched_rr_get_interval_time64
10945     case TARGET_NR_sched_rr_get_interval_time64:
10946         {
10947             struct timespec ts;
10948             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10949             if (!is_error(ret)) {
10950                 ret = host_to_target_timespec64(arg2, &ts);
10951             }
10952         }
10953         return ret;
10954 #endif
10955 #if defined(TARGET_NR_nanosleep)
10956     case TARGET_NR_nanosleep:
10957         {
10958             struct timespec req, rem;
10959             target_to_host_timespec(&req, arg1);
10960             ret = get_errno(safe_nanosleep(&req, &rem));
10961             if (is_error(ret) && arg2) {
10962                 host_to_target_timespec(arg2, &rem);
10963             }
10964         }
10965         return ret;
10966 #endif
10967     case TARGET_NR_prctl:
10968         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10969         break;
10970 #ifdef TARGET_NR_arch_prctl
10971     case TARGET_NR_arch_prctl:
10972         return do_arch_prctl(cpu_env, arg1, arg2);
10973 #endif
10974 #ifdef TARGET_NR_pread64
10975     case TARGET_NR_pread64:
10976         if (regpairs_aligned(cpu_env, num)) {
10977             arg4 = arg5;
10978             arg5 = arg6;
10979         }
10980         if (arg2 == 0 && arg3 == 0) {
10981             /* Special-case NULL buffer and zero length, which should succeed */
10982             p = 0;
10983         } else {
10984             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10985             if (!p) {
10986                 return -TARGET_EFAULT;
10987             }
10988         }
10989         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10990         unlock_user(p, arg2, ret);
10991         return ret;
10992     case TARGET_NR_pwrite64:
10993         if (regpairs_aligned(cpu_env, num)) {
10994             arg4 = arg5;
10995             arg5 = arg6;
10996         }
10997         if (arg2 == 0 && arg3 == 0) {
10998             /* Special-case NULL buffer and zero length, which should succeed */
10999             p = 0;
11000         } else {
11001             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11002             if (!p) {
11003                 return -TARGET_EFAULT;
11004             }
11005         }
11006         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11007         unlock_user(p, arg2, 0);
11008         return ret;
11009 #endif
11010     case TARGET_NR_getcwd:
11011         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11012             return -TARGET_EFAULT;
11013         ret = get_errno(sys_getcwd1(p, arg2));
11014         unlock_user(p, arg1, ret);
11015         return ret;
11016     case TARGET_NR_capget:
11017     case TARGET_NR_capset:
11018     {
11019         struct target_user_cap_header *target_header;
11020         struct target_user_cap_data *target_data = NULL;
11021         struct __user_cap_header_struct header;
11022         struct __user_cap_data_struct data[2];
11023         struct __user_cap_data_struct *dataptr = NULL;
11024         int i, target_datalen;
11025         int data_items = 1;
11026 
11027         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11028             return -TARGET_EFAULT;
11029         }
11030         header.version = tswap32(target_header->version);
11031         header.pid = tswap32(target_header->pid);
11032 
11033         if (header.version != _LINUX_CAPABILITY_VERSION) {
11034             /* Version 2 and up takes pointer to two user_data structs */
11035             data_items = 2;
11036         }
11037 
11038         target_datalen = sizeof(*target_data) * data_items;
11039 
11040         if (arg2) {
11041             if (num == TARGET_NR_capget) {
11042                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11043             } else {
11044                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11045             }
11046             if (!target_data) {
11047                 unlock_user_struct(target_header, arg1, 0);
11048                 return -TARGET_EFAULT;
11049             }
11050 
11051             if (num == TARGET_NR_capset) {
11052                 for (i = 0; i < data_items; i++) {
11053                     data[i].effective = tswap32(target_data[i].effective);
11054                     data[i].permitted = tswap32(target_data[i].permitted);
11055                     data[i].inheritable = tswap32(target_data[i].inheritable);
11056                 }
11057             }
11058 
11059             dataptr = data;
11060         }
11061 
11062         if (num == TARGET_NR_capget) {
11063             ret = get_errno(capget(&header, dataptr));
11064         } else {
11065             ret = get_errno(capset(&header, dataptr));
11066         }
11067 
11068         /* The kernel always updates version for both capget and capset */
11069         target_header->version = tswap32(header.version);
11070         unlock_user_struct(target_header, arg1, 1);
11071 
11072         if (arg2) {
11073             if (num == TARGET_NR_capget) {
11074                 for (i = 0; i < data_items; i++) {
11075                     target_data[i].effective = tswap32(data[i].effective);
11076                     target_data[i].permitted = tswap32(data[i].permitted);
11077                     target_data[i].inheritable = tswap32(data[i].inheritable);
11078                 }
11079                 unlock_user(target_data, arg2, target_datalen);
11080             } else {
11081                 unlock_user(target_data, arg2, 0);
11082             }
11083         }
11084         return ret;
11085     }
11086     case TARGET_NR_sigaltstack:
11087         return do_sigaltstack(arg1, arg2, cpu_env);
11088 
11089 #ifdef CONFIG_SENDFILE
11090 #ifdef TARGET_NR_sendfile
11091     case TARGET_NR_sendfile:
11092     {
11093         off_t *offp = NULL;
11094         off_t off;
11095         if (arg3) {
11096             ret = get_user_sal(off, arg3);
11097             if (is_error(ret)) {
11098                 return ret;
11099             }
11100             offp = &off;
11101         }
11102         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11103         if (!is_error(ret) && arg3) {
11104             abi_long ret2 = put_user_sal(off, arg3);
11105             if (is_error(ret2)) {
11106                 ret = ret2;
11107             }
11108         }
11109         return ret;
11110     }
11111 #endif
11112 #ifdef TARGET_NR_sendfile64
11113     case TARGET_NR_sendfile64:
11114     {
11115         off_t *offp = NULL;
11116         off_t off;
11117         if (arg3) {
11118             ret = get_user_s64(off, arg3);
11119             if (is_error(ret)) {
11120                 return ret;
11121             }
11122             offp = &off;
11123         }
11124         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11125         if (!is_error(ret) && arg3) {
11126             abi_long ret2 = put_user_s64(off, arg3);
11127             if (is_error(ret2)) {
11128                 ret = ret2;
11129             }
11130         }
11131         return ret;
11132     }
11133 #endif
11134 #endif
11135 #ifdef TARGET_NR_vfork
11136     case TARGET_NR_vfork:
11137         return get_errno(do_fork(cpu_env,
11138                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11139                          0, 0, 0, 0));
11140 #endif
11141 #ifdef TARGET_NR_ugetrlimit
11142     case TARGET_NR_ugetrlimit:
11143     {
11144 	struct rlimit rlim;
11145 	int resource = target_to_host_resource(arg1);
11146 	ret = get_errno(getrlimit(resource, &rlim));
11147 	if (!is_error(ret)) {
11148 	    struct target_rlimit *target_rlim;
11149             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11150                 return -TARGET_EFAULT;
11151 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11152 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11153             unlock_user_struct(target_rlim, arg2, 1);
11154 	}
11155         return ret;
11156     }
11157 #endif
11158 #ifdef TARGET_NR_truncate64
11159     case TARGET_NR_truncate64:
11160         if (!(p = lock_user_string(arg1)))
11161             return -TARGET_EFAULT;
11162 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11163         unlock_user(p, arg1, 0);
11164         return ret;
11165 #endif
11166 #ifdef TARGET_NR_ftruncate64
11167     case TARGET_NR_ftruncate64:
11168         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11169 #endif
11170 #ifdef TARGET_NR_stat64
11171     case TARGET_NR_stat64:
11172         if (!(p = lock_user_string(arg1))) {
11173             return -TARGET_EFAULT;
11174         }
11175         ret = get_errno(stat(path(p), &st));
11176         unlock_user(p, arg1, 0);
11177         if (!is_error(ret))
11178             ret = host_to_target_stat64(cpu_env, arg2, &st);
11179         return ret;
11180 #endif
11181 #ifdef TARGET_NR_lstat64
11182     case TARGET_NR_lstat64:
11183         if (!(p = lock_user_string(arg1))) {
11184             return -TARGET_EFAULT;
11185         }
11186         ret = get_errno(lstat(path(p), &st));
11187         unlock_user(p, arg1, 0);
11188         if (!is_error(ret))
11189             ret = host_to_target_stat64(cpu_env, arg2, &st);
11190         return ret;
11191 #endif
11192 #ifdef TARGET_NR_fstat64
11193     case TARGET_NR_fstat64:
11194         ret = get_errno(fstat(arg1, &st));
11195         if (!is_error(ret))
11196             ret = host_to_target_stat64(cpu_env, arg2, &st);
11197         return ret;
11198 #endif
11199 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11200 #ifdef TARGET_NR_fstatat64
11201     case TARGET_NR_fstatat64:
11202 #endif
11203 #ifdef TARGET_NR_newfstatat
11204     case TARGET_NR_newfstatat:
11205 #endif
11206         if (!(p = lock_user_string(arg2))) {
11207             return -TARGET_EFAULT;
11208         }
11209         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11210         unlock_user(p, arg2, 0);
11211         if (!is_error(ret))
11212             ret = host_to_target_stat64(cpu_env, arg3, &st);
11213         return ret;
11214 #endif
11215 #if defined(TARGET_NR_statx)
11216     case TARGET_NR_statx:
11217         {
11218             struct target_statx *target_stx;
11219             int dirfd = arg1;
11220             int flags = arg3;
11221 
11222             p = lock_user_string(arg2);
11223             if (p == NULL) {
11224                 return -TARGET_EFAULT;
11225             }
11226 #if defined(__NR_statx)
11227             {
11228                 /*
11229                  * It is assumed that struct statx is architecture independent.
11230                  */
11231                 struct target_statx host_stx;
11232                 int mask = arg4;
11233 
11234                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11235                 if (!is_error(ret)) {
11236                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11237                         unlock_user(p, arg2, 0);
11238                         return -TARGET_EFAULT;
11239                     }
11240                 }
11241 
11242                 if (ret != -TARGET_ENOSYS) {
11243                     unlock_user(p, arg2, 0);
11244                     return ret;
11245                 }
11246             }
11247 #endif
11248             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11249             unlock_user(p, arg2, 0);
11250 
11251             if (!is_error(ret)) {
11252                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11253                     return -TARGET_EFAULT;
11254                 }
11255                 memset(target_stx, 0, sizeof(*target_stx));
11256                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11257                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11258                 __put_user(st.st_ino, &target_stx->stx_ino);
11259                 __put_user(st.st_mode, &target_stx->stx_mode);
11260                 __put_user(st.st_uid, &target_stx->stx_uid);
11261                 __put_user(st.st_gid, &target_stx->stx_gid);
11262                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11263                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11264                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11265                 __put_user(st.st_size, &target_stx->stx_size);
11266                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11267                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11268                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11269                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11270                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11271                 unlock_user_struct(target_stx, arg5, 1);
11272             }
11273         }
11274         return ret;
11275 #endif
11276 #ifdef TARGET_NR_lchown
11277     case TARGET_NR_lchown:
11278         if (!(p = lock_user_string(arg1)))
11279             return -TARGET_EFAULT;
11280         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11281         unlock_user(p, arg1, 0);
11282         return ret;
11283 #endif
11284 #ifdef TARGET_NR_getuid
11285     case TARGET_NR_getuid:
11286         return get_errno(high2lowuid(getuid()));
11287 #endif
11288 #ifdef TARGET_NR_getgid
11289     case TARGET_NR_getgid:
11290         return get_errno(high2lowgid(getgid()));
11291 #endif
11292 #ifdef TARGET_NR_geteuid
11293     case TARGET_NR_geteuid:
11294         return get_errno(high2lowuid(geteuid()));
11295 #endif
11296 #ifdef TARGET_NR_getegid
11297     case TARGET_NR_getegid:
11298         return get_errno(high2lowgid(getegid()));
11299 #endif
11300     case TARGET_NR_setreuid:
11301         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11302     case TARGET_NR_setregid:
11303         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11304     case TARGET_NR_getgroups:
11305         {
11306             int gidsetsize = arg1;
11307             target_id *target_grouplist;
11308             gid_t *grouplist;
11309             int i;
11310 
11311             grouplist = alloca(gidsetsize * sizeof(gid_t));
11312             ret = get_errno(getgroups(gidsetsize, grouplist));
11313             if (gidsetsize == 0)
11314                 return ret;
11315             if (!is_error(ret)) {
11316                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11317                 if (!target_grouplist)
11318                     return -TARGET_EFAULT;
11319                 for(i = 0;i < ret; i++)
11320                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11321                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11322             }
11323         }
11324         return ret;
11325     case TARGET_NR_setgroups:
11326         {
11327             int gidsetsize = arg1;
11328             target_id *target_grouplist;
11329             gid_t *grouplist = NULL;
11330             int i;
11331             if (gidsetsize) {
11332                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11333                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11334                 if (!target_grouplist) {
11335                     return -TARGET_EFAULT;
11336                 }
11337                 for (i = 0; i < gidsetsize; i++) {
11338                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11339                 }
11340                 unlock_user(target_grouplist, arg2, 0);
11341             }
11342             return get_errno(setgroups(gidsetsize, grouplist));
11343         }
11344     case TARGET_NR_fchown:
11345         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11346 #if defined(TARGET_NR_fchownat)
11347     case TARGET_NR_fchownat:
11348         if (!(p = lock_user_string(arg2)))
11349             return -TARGET_EFAULT;
11350         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11351                                  low2highgid(arg4), arg5));
11352         unlock_user(p, arg2, 0);
11353         return ret;
11354 #endif
11355 #ifdef TARGET_NR_setresuid
11356     case TARGET_NR_setresuid:
11357         return get_errno(sys_setresuid(low2highuid(arg1),
11358                                        low2highuid(arg2),
11359                                        low2highuid(arg3)));
11360 #endif
11361 #ifdef TARGET_NR_getresuid
11362     case TARGET_NR_getresuid:
11363         {
11364             uid_t ruid, euid, suid;
11365             ret = get_errno(getresuid(&ruid, &euid, &suid));
11366             if (!is_error(ret)) {
11367                 if (put_user_id(high2lowuid(ruid), arg1)
11368                     || put_user_id(high2lowuid(euid), arg2)
11369                     || put_user_id(high2lowuid(suid), arg3))
11370                     return -TARGET_EFAULT;
11371             }
11372         }
11373         return ret;
11374 #endif
11375 #ifdef TARGET_NR_getresgid
11376     case TARGET_NR_setresgid:
11377         return get_errno(sys_setresgid(low2highgid(arg1),
11378                                        low2highgid(arg2),
11379                                        low2highgid(arg3)));
11380 #endif
11381 #ifdef TARGET_NR_getresgid
11382     case TARGET_NR_getresgid:
11383         {
11384             gid_t rgid, egid, sgid;
11385             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11386             if (!is_error(ret)) {
11387                 if (put_user_id(high2lowgid(rgid), arg1)
11388                     || put_user_id(high2lowgid(egid), arg2)
11389                     || put_user_id(high2lowgid(sgid), arg3))
11390                     return -TARGET_EFAULT;
11391             }
11392         }
11393         return ret;
11394 #endif
11395 #ifdef TARGET_NR_chown
11396     case TARGET_NR_chown:
11397         if (!(p = lock_user_string(arg1)))
11398             return -TARGET_EFAULT;
11399         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11400         unlock_user(p, arg1, 0);
11401         return ret;
11402 #endif
11403     case TARGET_NR_setuid:
11404         return get_errno(sys_setuid(low2highuid(arg1)));
11405     case TARGET_NR_setgid:
11406         return get_errno(sys_setgid(low2highgid(arg1)));
11407     case TARGET_NR_setfsuid:
11408         return get_errno(setfsuid(arg1));
11409     case TARGET_NR_setfsgid:
11410         return get_errno(setfsgid(arg1));
11411 
11412 #ifdef TARGET_NR_lchown32
11413     case TARGET_NR_lchown32:
11414         if (!(p = lock_user_string(arg1)))
11415             return -TARGET_EFAULT;
11416         ret = get_errno(lchown(p, arg2, arg3));
11417         unlock_user(p, arg1, 0);
11418         return ret;
11419 #endif
11420 #ifdef TARGET_NR_getuid32
11421     case TARGET_NR_getuid32:
11422         return get_errno(getuid());
11423 #endif
11424 
11425 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11426    /* Alpha specific */
11427     case TARGET_NR_getxuid:
11428          {
11429             uid_t euid;
11430             euid=geteuid();
11431             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11432          }
11433         return get_errno(getuid());
11434 #endif
11435 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11436    /* Alpha specific */
11437     case TARGET_NR_getxgid:
11438          {
11439             uid_t egid;
11440             egid=getegid();
11441             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11442          }
11443         return get_errno(getgid());
11444 #endif
11445 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11446     /* Alpha specific */
11447     case TARGET_NR_osf_getsysinfo:
11448         ret = -TARGET_EOPNOTSUPP;
11449         switch (arg1) {
11450           case TARGET_GSI_IEEE_FP_CONTROL:
11451             {
11452                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11453                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11454 
11455                 swcr &= ~SWCR_STATUS_MASK;
11456                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11457 
11458                 if (put_user_u64 (swcr, arg2))
11459                         return -TARGET_EFAULT;
11460                 ret = 0;
11461             }
11462             break;
11463 
11464           /* case GSI_IEEE_STATE_AT_SIGNAL:
11465              -- Not implemented in linux kernel.
11466              case GSI_UACPROC:
11467              -- Retrieves current unaligned access state; not much used.
11468              case GSI_PROC_TYPE:
11469              -- Retrieves implver information; surely not used.
11470              case GSI_GET_HWRPB:
11471              -- Grabs a copy of the HWRPB; surely not used.
11472           */
11473         }
11474         return ret;
11475 #endif
11476 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11477     /* Alpha specific */
11478     case TARGET_NR_osf_setsysinfo:
11479         ret = -TARGET_EOPNOTSUPP;
11480         switch (arg1) {
11481           case TARGET_SSI_IEEE_FP_CONTROL:
11482             {
11483                 uint64_t swcr, fpcr;
11484 
11485                 if (get_user_u64 (swcr, arg2)) {
11486                     return -TARGET_EFAULT;
11487                 }
11488 
11489                 /*
11490                  * The kernel calls swcr_update_status to update the
11491                  * status bits from the fpcr at every point that it
11492                  * could be queried.  Therefore, we store the status
11493                  * bits only in FPCR.
11494                  */
11495                 ((CPUAlphaState *)cpu_env)->swcr
11496                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11497 
11498                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11499                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11500                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11501                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11502                 ret = 0;
11503             }
11504             break;
11505 
11506           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11507             {
11508                 uint64_t exc, fpcr, fex;
11509 
11510                 if (get_user_u64(exc, arg2)) {
11511                     return -TARGET_EFAULT;
11512                 }
11513                 exc &= SWCR_STATUS_MASK;
11514                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11515 
11516                 /* Old exceptions are not signaled.  */
11517                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11518                 fex = exc & ~fex;
11519                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11520                 fex &= ((CPUArchState *)cpu_env)->swcr;
11521 
11522                 /* Update the hardware fpcr.  */
11523                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11524                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11525 
11526                 if (fex) {
11527                     int si_code = TARGET_FPE_FLTUNK;
11528                     target_siginfo_t info;
11529 
11530                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11531                         si_code = TARGET_FPE_FLTUND;
11532                     }
11533                     if (fex & SWCR_TRAP_ENABLE_INE) {
11534                         si_code = TARGET_FPE_FLTRES;
11535                     }
11536                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11537                         si_code = TARGET_FPE_FLTUND;
11538                     }
11539                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11540                         si_code = TARGET_FPE_FLTOVF;
11541                     }
11542                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11543                         si_code = TARGET_FPE_FLTDIV;
11544                     }
11545                     if (fex & SWCR_TRAP_ENABLE_INV) {
11546                         si_code = TARGET_FPE_FLTINV;
11547                     }
11548 
11549                     info.si_signo = SIGFPE;
11550                     info.si_errno = 0;
11551                     info.si_code = si_code;
11552                     info._sifields._sigfault._addr
11553                         = ((CPUArchState *)cpu_env)->pc;
11554                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11555                                  QEMU_SI_FAULT, &info);
11556                 }
11557                 ret = 0;
11558             }
11559             break;
11560 
11561           /* case SSI_NVPAIRS:
11562              -- Used with SSIN_UACPROC to enable unaligned accesses.
11563              case SSI_IEEE_STATE_AT_SIGNAL:
11564              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11565              -- Not implemented in linux kernel
11566           */
11567         }
11568         return ret;
11569 #endif
11570 #ifdef TARGET_NR_osf_sigprocmask
11571     /* Alpha specific.  */
11572     case TARGET_NR_osf_sigprocmask:
11573         {
11574             abi_ulong mask;
11575             int how;
11576             sigset_t set, oldset;
11577 
11578             switch(arg1) {
11579             case TARGET_SIG_BLOCK:
11580                 how = SIG_BLOCK;
11581                 break;
11582             case TARGET_SIG_UNBLOCK:
11583                 how = SIG_UNBLOCK;
11584                 break;
11585             case TARGET_SIG_SETMASK:
11586                 how = SIG_SETMASK;
11587                 break;
11588             default:
11589                 return -TARGET_EINVAL;
11590             }
11591             mask = arg2;
11592             target_to_host_old_sigset(&set, &mask);
11593             ret = do_sigprocmask(how, &set, &oldset);
11594             if (!ret) {
11595                 host_to_target_old_sigset(&mask, &oldset);
11596                 ret = mask;
11597             }
11598         }
11599         return ret;
11600 #endif
11601 
11602 #ifdef TARGET_NR_getgid32
11603     case TARGET_NR_getgid32:
11604         return get_errno(getgid());
11605 #endif
11606 #ifdef TARGET_NR_geteuid32
11607     case TARGET_NR_geteuid32:
11608         return get_errno(geteuid());
11609 #endif
11610 #ifdef TARGET_NR_getegid32
11611     case TARGET_NR_getegid32:
11612         return get_errno(getegid());
11613 #endif
11614 #ifdef TARGET_NR_setreuid32
11615     case TARGET_NR_setreuid32:
11616         return get_errno(setreuid(arg1, arg2));
11617 #endif
11618 #ifdef TARGET_NR_setregid32
11619     case TARGET_NR_setregid32:
11620         return get_errno(setregid(arg1, arg2));
11621 #endif
11622 #ifdef TARGET_NR_getgroups32
11623     case TARGET_NR_getgroups32:
11624         {
11625             int gidsetsize = arg1;
11626             uint32_t *target_grouplist;
11627             gid_t *grouplist;
11628             int i;
11629 
11630             grouplist = alloca(gidsetsize * sizeof(gid_t));
11631             ret = get_errno(getgroups(gidsetsize, grouplist));
11632             if (gidsetsize == 0)
11633                 return ret;
11634             if (!is_error(ret)) {
11635                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11636                 if (!target_grouplist) {
11637                     return -TARGET_EFAULT;
11638                 }
11639                 for(i = 0;i < ret; i++)
11640                     target_grouplist[i] = tswap32(grouplist[i]);
11641                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11642             }
11643         }
11644         return ret;
11645 #endif
11646 #ifdef TARGET_NR_setgroups32
11647     case TARGET_NR_setgroups32:
11648         {
11649             int gidsetsize = arg1;
11650             uint32_t *target_grouplist;
11651             gid_t *grouplist;
11652             int i;
11653 
11654             grouplist = alloca(gidsetsize * sizeof(gid_t));
11655             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11656             if (!target_grouplist) {
11657                 return -TARGET_EFAULT;
11658             }
11659             for(i = 0;i < gidsetsize; i++)
11660                 grouplist[i] = tswap32(target_grouplist[i]);
11661             unlock_user(target_grouplist, arg2, 0);
11662             return get_errno(setgroups(gidsetsize, grouplist));
11663         }
11664 #endif
11665 #ifdef TARGET_NR_fchown32
11666     case TARGET_NR_fchown32:
11667         return get_errno(fchown(arg1, arg2, arg3));
11668 #endif
11669 #ifdef TARGET_NR_setresuid32
11670     case TARGET_NR_setresuid32:
11671         return get_errno(sys_setresuid(arg1, arg2, arg3));
11672 #endif
11673 #ifdef TARGET_NR_getresuid32
11674     case TARGET_NR_getresuid32:
11675         {
11676             uid_t ruid, euid, suid;
11677             ret = get_errno(getresuid(&ruid, &euid, &suid));
11678             if (!is_error(ret)) {
11679                 if (put_user_u32(ruid, arg1)
11680                     || put_user_u32(euid, arg2)
11681                     || put_user_u32(suid, arg3))
11682                     return -TARGET_EFAULT;
11683             }
11684         }
11685         return ret;
11686 #endif
11687 #ifdef TARGET_NR_setresgid32
11688     case TARGET_NR_setresgid32:
11689         return get_errno(sys_setresgid(arg1, arg2, arg3));
11690 #endif
11691 #ifdef TARGET_NR_getresgid32
11692     case TARGET_NR_getresgid32:
11693         {
11694             gid_t rgid, egid, sgid;
11695             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11696             if (!is_error(ret)) {
11697                 if (put_user_u32(rgid, arg1)
11698                     || put_user_u32(egid, arg2)
11699                     || put_user_u32(sgid, arg3))
11700                     return -TARGET_EFAULT;
11701             }
11702         }
11703         return ret;
11704 #endif
11705 #ifdef TARGET_NR_chown32
11706     case TARGET_NR_chown32:
11707         if (!(p = lock_user_string(arg1)))
11708             return -TARGET_EFAULT;
11709         ret = get_errno(chown(p, arg2, arg3));
11710         unlock_user(p, arg1, 0);
11711         return ret;
11712 #endif
11713 #ifdef TARGET_NR_setuid32
11714     case TARGET_NR_setuid32:
11715         return get_errno(sys_setuid(arg1));
11716 #endif
11717 #ifdef TARGET_NR_setgid32
11718     case TARGET_NR_setgid32:
11719         return get_errno(sys_setgid(arg1));
11720 #endif
11721 #ifdef TARGET_NR_setfsuid32
11722     case TARGET_NR_setfsuid32:
11723         return get_errno(setfsuid(arg1));
11724 #endif
11725 #ifdef TARGET_NR_setfsgid32
11726     case TARGET_NR_setfsgid32:
11727         return get_errno(setfsgid(arg1));
11728 #endif
11729 #ifdef TARGET_NR_mincore
11730     case TARGET_NR_mincore:
11731         {
11732             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11733             if (!a) {
11734                 return -TARGET_ENOMEM;
11735             }
11736             p = lock_user_string(arg3);
11737             if (!p) {
11738                 ret = -TARGET_EFAULT;
11739             } else {
11740                 ret = get_errno(mincore(a, arg2, p));
11741                 unlock_user(p, arg3, ret);
11742             }
11743             unlock_user(a, arg1, 0);
11744         }
11745         return ret;
11746 #endif
11747 #ifdef TARGET_NR_arm_fadvise64_64
11748     case TARGET_NR_arm_fadvise64_64:
11749         /* arm_fadvise64_64 looks like fadvise64_64 but
11750          * with different argument order: fd, advice, offset, len
11751          * rather than the usual fd, offset, len, advice.
11752          * Note that offset and len are both 64-bit so appear as
11753          * pairs of 32-bit registers.
11754          */
11755         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11756                             target_offset64(arg5, arg6), arg2);
11757         return -host_to_target_errno(ret);
11758 #endif
11759 
11760 #if TARGET_ABI_BITS == 32
11761 
11762 #ifdef TARGET_NR_fadvise64_64
11763     case TARGET_NR_fadvise64_64:
11764 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11765         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11766         ret = arg2;
11767         arg2 = arg3;
11768         arg3 = arg4;
11769         arg4 = arg5;
11770         arg5 = arg6;
11771         arg6 = ret;
11772 #else
11773         /* 6 args: fd, offset (high, low), len (high, low), advice */
11774         if (regpairs_aligned(cpu_env, num)) {
11775             /* offset is in (3,4), len in (5,6) and advice in 7 */
11776             arg2 = arg3;
11777             arg3 = arg4;
11778             arg4 = arg5;
11779             arg5 = arg6;
11780             arg6 = arg7;
11781         }
11782 #endif
11783         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11784                             target_offset64(arg4, arg5), arg6);
11785         return -host_to_target_errno(ret);
11786 #endif
11787 
11788 #ifdef TARGET_NR_fadvise64
11789     case TARGET_NR_fadvise64:
11790         /* 5 args: fd, offset (high, low), len, advice */
11791         if (regpairs_aligned(cpu_env, num)) {
11792             /* offset is in (3,4), len in 5 and advice in 6 */
11793             arg2 = arg3;
11794             arg3 = arg4;
11795             arg4 = arg5;
11796             arg5 = arg6;
11797         }
11798         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11799         return -host_to_target_errno(ret);
11800 #endif
11801 
11802 #else /* not a 32-bit ABI */
11803 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11804 #ifdef TARGET_NR_fadvise64_64
11805     case TARGET_NR_fadvise64_64:
11806 #endif
11807 #ifdef TARGET_NR_fadvise64
11808     case TARGET_NR_fadvise64:
11809 #endif
11810 #ifdef TARGET_S390X
11811         switch (arg4) {
11812         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11813         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11814         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11815         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11816         default: break;
11817         }
11818 #endif
11819         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11820 #endif
11821 #endif /* end of 64-bit ABI fadvise handling */
11822 
11823 #ifdef TARGET_NR_madvise
11824     case TARGET_NR_madvise:
11825         /* A straight passthrough may not be safe because qemu sometimes
11826            turns private file-backed mappings into anonymous mappings.
11827            This will break MADV_DONTNEED.
11828            This is a hint, so ignoring and returning success is ok.  */
11829         return 0;
11830 #endif
11831 #ifdef TARGET_NR_fcntl64
11832     case TARGET_NR_fcntl64:
11833     {
11834         int cmd;
11835         struct flock64 fl;
11836         from_flock64_fn *copyfrom = copy_from_user_flock64;
11837         to_flock64_fn *copyto = copy_to_user_flock64;
11838 
11839 #ifdef TARGET_ARM
11840         if (!((CPUARMState *)cpu_env)->eabi) {
11841             copyfrom = copy_from_user_oabi_flock64;
11842             copyto = copy_to_user_oabi_flock64;
11843         }
11844 #endif
11845 
11846         cmd = target_to_host_fcntl_cmd(arg2);
11847         if (cmd == -TARGET_EINVAL) {
11848             return cmd;
11849         }
11850 
11851         switch(arg2) {
11852         case TARGET_F_GETLK64:
11853             ret = copyfrom(&fl, arg3);
11854             if (ret) {
11855                 break;
11856             }
11857             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11858             if (ret == 0) {
11859                 ret = copyto(arg3, &fl);
11860             }
11861 	    break;
11862 
11863         case TARGET_F_SETLK64:
11864         case TARGET_F_SETLKW64:
11865             ret = copyfrom(&fl, arg3);
11866             if (ret) {
11867                 break;
11868             }
11869             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11870 	    break;
11871         default:
11872             ret = do_fcntl(arg1, arg2, arg3);
11873             break;
11874         }
11875         return ret;
11876     }
11877 #endif
11878 #ifdef TARGET_NR_cacheflush
11879     case TARGET_NR_cacheflush:
11880         /* self-modifying code is handled automatically, so nothing needed */
11881         return 0;
11882 #endif
11883 #ifdef TARGET_NR_getpagesize
11884     case TARGET_NR_getpagesize:
11885         return TARGET_PAGE_SIZE;
11886 #endif
11887     case TARGET_NR_gettid:
11888         return get_errno(sys_gettid());
11889 #ifdef TARGET_NR_readahead
11890     case TARGET_NR_readahead:
11891 #if TARGET_ABI_BITS == 32
11892         if (regpairs_aligned(cpu_env, num)) {
11893             arg2 = arg3;
11894             arg3 = arg4;
11895             arg4 = arg5;
11896         }
11897         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11898 #else
11899         ret = get_errno(readahead(arg1, arg2, arg3));
11900 #endif
11901         return ret;
11902 #endif
11903 #ifdef CONFIG_ATTR
11904 #ifdef TARGET_NR_setxattr
11905     case TARGET_NR_listxattr:
11906     case TARGET_NR_llistxattr:
11907     {
11908         void *p, *b = 0;
11909         if (arg2) {
11910             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11911             if (!b) {
11912                 return -TARGET_EFAULT;
11913             }
11914         }
11915         p = lock_user_string(arg1);
11916         if (p) {
11917             if (num == TARGET_NR_listxattr) {
11918                 ret = get_errno(listxattr(p, b, arg3));
11919             } else {
11920                 ret = get_errno(llistxattr(p, b, arg3));
11921             }
11922         } else {
11923             ret = -TARGET_EFAULT;
11924         }
11925         unlock_user(p, arg1, 0);
11926         unlock_user(b, arg2, arg3);
11927         return ret;
11928     }
11929     case TARGET_NR_flistxattr:
11930     {
11931         void *b = 0;
11932         if (arg2) {
11933             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11934             if (!b) {
11935                 return -TARGET_EFAULT;
11936             }
11937         }
11938         ret = get_errno(flistxattr(arg1, b, arg3));
11939         unlock_user(b, arg2, arg3);
11940         return ret;
11941     }
11942     case TARGET_NR_setxattr:
11943     case TARGET_NR_lsetxattr:
11944         {
11945             void *p, *n, *v = 0;
11946             if (arg3) {
11947                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11948                 if (!v) {
11949                     return -TARGET_EFAULT;
11950                 }
11951             }
11952             p = lock_user_string(arg1);
11953             n = lock_user_string(arg2);
11954             if (p && n) {
11955                 if (num == TARGET_NR_setxattr) {
11956                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11957                 } else {
11958                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11959                 }
11960             } else {
11961                 ret = -TARGET_EFAULT;
11962             }
11963             unlock_user(p, arg1, 0);
11964             unlock_user(n, arg2, 0);
11965             unlock_user(v, arg3, 0);
11966         }
11967         return ret;
11968     case TARGET_NR_fsetxattr:
11969         {
11970             void *n, *v = 0;
11971             if (arg3) {
11972                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11973                 if (!v) {
11974                     return -TARGET_EFAULT;
11975                 }
11976             }
11977             n = lock_user_string(arg2);
11978             if (n) {
11979                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11980             } else {
11981                 ret = -TARGET_EFAULT;
11982             }
11983             unlock_user(n, arg2, 0);
11984             unlock_user(v, arg3, 0);
11985         }
11986         return ret;
11987     case TARGET_NR_getxattr:
11988     case TARGET_NR_lgetxattr:
11989         {
11990             void *p, *n, *v = 0;
11991             if (arg3) {
11992                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11993                 if (!v) {
11994                     return -TARGET_EFAULT;
11995                 }
11996             }
11997             p = lock_user_string(arg1);
11998             n = lock_user_string(arg2);
11999             if (p && n) {
12000                 if (num == TARGET_NR_getxattr) {
12001                     ret = get_errno(getxattr(p, n, v, arg4));
12002                 } else {
12003                     ret = get_errno(lgetxattr(p, n, v, arg4));
12004                 }
12005             } else {
12006                 ret = -TARGET_EFAULT;
12007             }
12008             unlock_user(p, arg1, 0);
12009             unlock_user(n, arg2, 0);
12010             unlock_user(v, arg3, arg4);
12011         }
12012         return ret;
12013     case TARGET_NR_fgetxattr:
12014         {
12015             void *n, *v = 0;
12016             if (arg3) {
12017                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12018                 if (!v) {
12019                     return -TARGET_EFAULT;
12020                 }
12021             }
12022             n = lock_user_string(arg2);
12023             if (n) {
12024                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12025             } else {
12026                 ret = -TARGET_EFAULT;
12027             }
12028             unlock_user(n, arg2, 0);
12029             unlock_user(v, arg3, arg4);
12030         }
12031         return ret;
12032     case TARGET_NR_removexattr:
12033     case TARGET_NR_lremovexattr:
12034         {
12035             void *p, *n;
12036             p = lock_user_string(arg1);
12037             n = lock_user_string(arg2);
12038             if (p && n) {
12039                 if (num == TARGET_NR_removexattr) {
12040                     ret = get_errno(removexattr(p, n));
12041                 } else {
12042                     ret = get_errno(lremovexattr(p, n));
12043                 }
12044             } else {
12045                 ret = -TARGET_EFAULT;
12046             }
12047             unlock_user(p, arg1, 0);
12048             unlock_user(n, arg2, 0);
12049         }
12050         return ret;
12051     case TARGET_NR_fremovexattr:
12052         {
12053             void *n;
12054             n = lock_user_string(arg2);
12055             if (n) {
12056                 ret = get_errno(fremovexattr(arg1, n));
12057             } else {
12058                 ret = -TARGET_EFAULT;
12059             }
12060             unlock_user(n, arg2, 0);
12061         }
12062         return ret;
12063 #endif
12064 #endif /* CONFIG_ATTR */
12065 #ifdef TARGET_NR_set_thread_area
12066     case TARGET_NR_set_thread_area:
12067 #if defined(TARGET_MIPS)
12068       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12069       return 0;
12070 #elif defined(TARGET_CRIS)
12071       if (arg1 & 0xff)
12072           ret = -TARGET_EINVAL;
12073       else {
12074           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12075           ret = 0;
12076       }
12077       return ret;
12078 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12079       return do_set_thread_area(cpu_env, arg1);
12080 #elif defined(TARGET_M68K)
12081       {
12082           TaskState *ts = cpu->opaque;
12083           ts->tp_value = arg1;
12084           return 0;
12085       }
12086 #else
12087       return -TARGET_ENOSYS;
12088 #endif
12089 #endif
12090 #ifdef TARGET_NR_get_thread_area
12091     case TARGET_NR_get_thread_area:
12092 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12093         return do_get_thread_area(cpu_env, arg1);
12094 #elif defined(TARGET_M68K)
12095         {
12096             TaskState *ts = cpu->opaque;
12097             return ts->tp_value;
12098         }
12099 #else
12100         return -TARGET_ENOSYS;
12101 #endif
12102 #endif
12103 #ifdef TARGET_NR_getdomainname
12104     case TARGET_NR_getdomainname:
12105         return -TARGET_ENOSYS;
12106 #endif
12107 
12108 #ifdef TARGET_NR_clock_settime
12109     case TARGET_NR_clock_settime:
12110     {
12111         struct timespec ts;
12112 
12113         ret = target_to_host_timespec(&ts, arg2);
12114         if (!is_error(ret)) {
12115             ret = get_errno(clock_settime(arg1, &ts));
12116         }
12117         return ret;
12118     }
12119 #endif
12120 #ifdef TARGET_NR_clock_settime64
12121     case TARGET_NR_clock_settime64:
12122     {
12123         struct timespec ts;
12124 
12125         ret = target_to_host_timespec64(&ts, arg2);
12126         if (!is_error(ret)) {
12127             ret = get_errno(clock_settime(arg1, &ts));
12128         }
12129         return ret;
12130     }
12131 #endif
12132 #ifdef TARGET_NR_clock_gettime
12133     case TARGET_NR_clock_gettime:
12134     {
12135         struct timespec ts;
12136         ret = get_errno(clock_gettime(arg1, &ts));
12137         if (!is_error(ret)) {
12138             ret = host_to_target_timespec(arg2, &ts);
12139         }
12140         return ret;
12141     }
12142 #endif
12143 #ifdef TARGET_NR_clock_gettime64
12144     case TARGET_NR_clock_gettime64:
12145     {
12146         struct timespec ts;
12147         ret = get_errno(clock_gettime(arg1, &ts));
12148         if (!is_error(ret)) {
12149             ret = host_to_target_timespec64(arg2, &ts);
12150         }
12151         return ret;
12152     }
12153 #endif
12154 #ifdef TARGET_NR_clock_getres
12155     case TARGET_NR_clock_getres:
12156     {
12157         struct timespec ts;
12158         ret = get_errno(clock_getres(arg1, &ts));
12159         if (!is_error(ret)) {
12160             host_to_target_timespec(arg2, &ts);
12161         }
12162         return ret;
12163     }
12164 #endif
12165 #ifdef TARGET_NR_clock_getres_time64
12166     case TARGET_NR_clock_getres_time64:
12167     {
12168         struct timespec ts;
12169         ret = get_errno(clock_getres(arg1, &ts));
12170         if (!is_error(ret)) {
12171             host_to_target_timespec64(arg2, &ts);
12172         }
12173         return ret;
12174     }
12175 #endif
12176 #ifdef TARGET_NR_clock_nanosleep
12177     case TARGET_NR_clock_nanosleep:
12178     {
12179         struct timespec ts;
12180         if (target_to_host_timespec(&ts, arg3)) {
12181             return -TARGET_EFAULT;
12182         }
12183         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12184                                              &ts, arg4 ? &ts : NULL));
12185         /*
12186          * if the call is interrupted by a signal handler, it fails
12187          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12188          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12189          */
12190         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12191             host_to_target_timespec(arg4, &ts)) {
12192               return -TARGET_EFAULT;
12193         }
12194 
12195         return ret;
12196     }
12197 #endif
12198 #ifdef TARGET_NR_clock_nanosleep_time64
12199     case TARGET_NR_clock_nanosleep_time64:
12200     {
12201         struct timespec ts;
12202 
12203         if (target_to_host_timespec64(&ts, arg3)) {
12204             return -TARGET_EFAULT;
12205         }
12206 
12207         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12208                                              &ts, arg4 ? &ts : NULL));
12209 
12210         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12211             host_to_target_timespec64(arg4, &ts)) {
12212             return -TARGET_EFAULT;
12213         }
12214         return ret;
12215     }
12216 #endif
12217 
12218 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12219     case TARGET_NR_set_tid_address:
12220         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12221 #endif
12222 
12223     case TARGET_NR_tkill:
12224         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12225 
12226     case TARGET_NR_tgkill:
12227         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12228                          target_to_host_signal(arg3)));
12229 
12230 #ifdef TARGET_NR_set_robust_list
12231     case TARGET_NR_set_robust_list:
12232     case TARGET_NR_get_robust_list:
12233         /* The ABI for supporting robust futexes has userspace pass
12234          * the kernel a pointer to a linked list which is updated by
12235          * userspace after the syscall; the list is walked by the kernel
12236          * when the thread exits. Since the linked list in QEMU guest
12237          * memory isn't a valid linked list for the host and we have
12238          * no way to reliably intercept the thread-death event, we can't
12239          * support these. Silently return ENOSYS so that guest userspace
12240          * falls back to a non-robust futex implementation (which should
12241          * be OK except in the corner case of the guest crashing while
12242          * holding a mutex that is shared with another process via
12243          * shared memory).
12244          */
12245         return -TARGET_ENOSYS;
12246 #endif
12247 
12248 #if defined(TARGET_NR_utimensat)
12249     case TARGET_NR_utimensat:
12250         {
12251             struct timespec *tsp, ts[2];
12252             if (!arg3) {
12253                 tsp = NULL;
12254             } else {
12255                 if (target_to_host_timespec(ts, arg3)) {
12256                     return -TARGET_EFAULT;
12257                 }
12258                 if (target_to_host_timespec(ts + 1, arg3 +
12259                                             sizeof(struct target_timespec))) {
12260                     return -TARGET_EFAULT;
12261                 }
12262                 tsp = ts;
12263             }
12264             if (!arg2)
12265                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12266             else {
12267                 if (!(p = lock_user_string(arg2))) {
12268                     return -TARGET_EFAULT;
12269                 }
12270                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12271                 unlock_user(p, arg2, 0);
12272             }
12273         }
12274         return ret;
12275 #endif
12276 #ifdef TARGET_NR_utimensat_time64
12277     case TARGET_NR_utimensat_time64:
12278         {
12279             struct timespec *tsp, ts[2];
12280             if (!arg3) {
12281                 tsp = NULL;
12282             } else {
12283                 if (target_to_host_timespec64(ts, arg3)) {
12284                     return -TARGET_EFAULT;
12285                 }
12286                 if (target_to_host_timespec64(ts + 1, arg3 +
12287                                      sizeof(struct target__kernel_timespec))) {
12288                     return -TARGET_EFAULT;
12289                 }
12290                 tsp = ts;
12291             }
12292             if (!arg2)
12293                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12294             else {
12295                 p = lock_user_string(arg2);
12296                 if (!p) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12300                 unlock_user(p, arg2, 0);
12301             }
12302         }
12303         return ret;
12304 #endif
12305 #ifdef TARGET_NR_futex
12306     case TARGET_NR_futex:
12307         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12308 #endif
12309 #ifdef TARGET_NR_futex_time64
12310     case TARGET_NR_futex_time64:
12311         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12312 #endif
12313 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12314     case TARGET_NR_inotify_init:
12315         ret = get_errno(sys_inotify_init());
12316         if (ret >= 0) {
12317             fd_trans_register(ret, &target_inotify_trans);
12318         }
12319         return ret;
12320 #endif
12321 #ifdef CONFIG_INOTIFY1
12322 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12323     case TARGET_NR_inotify_init1:
12324         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12325                                           fcntl_flags_tbl)));
12326         if (ret >= 0) {
12327             fd_trans_register(ret, &target_inotify_trans);
12328         }
12329         return ret;
12330 #endif
12331 #endif
12332 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12333     case TARGET_NR_inotify_add_watch:
12334         p = lock_user_string(arg2);
12335         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12336         unlock_user(p, arg2, 0);
12337         return ret;
12338 #endif
12339 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12340     case TARGET_NR_inotify_rm_watch:
12341         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12342 #endif
12343 
12344 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12345     case TARGET_NR_mq_open:
12346         {
12347             struct mq_attr posix_mq_attr;
12348             struct mq_attr *pposix_mq_attr;
12349             int host_flags;
12350 
12351             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12352             pposix_mq_attr = NULL;
12353             if (arg4) {
12354                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12355                     return -TARGET_EFAULT;
12356                 }
12357                 pposix_mq_attr = &posix_mq_attr;
12358             }
12359             p = lock_user_string(arg1 - 1);
12360             if (!p) {
12361                 return -TARGET_EFAULT;
12362             }
12363             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12364             unlock_user (p, arg1, 0);
12365         }
12366         return ret;
12367 
12368     case TARGET_NR_mq_unlink:
12369         p = lock_user_string(arg1 - 1);
12370         if (!p) {
12371             return -TARGET_EFAULT;
12372         }
12373         ret = get_errno(mq_unlink(p));
12374         unlock_user (p, arg1, 0);
12375         return ret;
12376 
12377 #ifdef TARGET_NR_mq_timedsend
12378     case TARGET_NR_mq_timedsend:
12379         {
12380             struct timespec ts;
12381 
12382             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12383             if (arg5 != 0) {
12384                 if (target_to_host_timespec(&ts, arg5)) {
12385                     return -TARGET_EFAULT;
12386                 }
12387                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12388                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12389                     return -TARGET_EFAULT;
12390                 }
12391             } else {
12392                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12393             }
12394             unlock_user (p, arg2, arg3);
12395         }
12396         return ret;
12397 #endif
12398 #ifdef TARGET_NR_mq_timedsend_time64
12399     case TARGET_NR_mq_timedsend_time64:
12400         {
12401             struct timespec ts;
12402 
12403             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12404             if (arg5 != 0) {
12405                 if (target_to_host_timespec64(&ts, arg5)) {
12406                     return -TARGET_EFAULT;
12407                 }
12408                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12409                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12410                     return -TARGET_EFAULT;
12411                 }
12412             } else {
12413                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12414             }
12415             unlock_user(p, arg2, arg3);
12416         }
12417         return ret;
12418 #endif
12419 
12420 #ifdef TARGET_NR_mq_timedreceive
12421     case TARGET_NR_mq_timedreceive:
12422         {
12423             struct timespec ts;
12424             unsigned int prio;
12425 
12426             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12427             if (arg5 != 0) {
12428                 if (target_to_host_timespec(&ts, arg5)) {
12429                     return -TARGET_EFAULT;
12430                 }
12431                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12432                                                      &prio, &ts));
12433                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12434                     return -TARGET_EFAULT;
12435                 }
12436             } else {
12437                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12438                                                      &prio, NULL));
12439             }
12440             unlock_user (p, arg2, arg3);
12441             if (arg4 != 0)
12442                 put_user_u32(prio, arg4);
12443         }
12444         return ret;
12445 #endif
12446 #ifdef TARGET_NR_mq_timedreceive_time64
12447     case TARGET_NR_mq_timedreceive_time64:
12448         {
12449             struct timespec ts;
12450             unsigned int prio;
12451 
12452             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12453             if (arg5 != 0) {
12454                 if (target_to_host_timespec64(&ts, arg5)) {
12455                     return -TARGET_EFAULT;
12456                 }
12457                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12458                                                      &prio, &ts));
12459                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12460                     return -TARGET_EFAULT;
12461                 }
12462             } else {
12463                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12464                                                      &prio, NULL));
12465             }
12466             unlock_user(p, arg2, arg3);
12467             if (arg4 != 0) {
12468                 put_user_u32(prio, arg4);
12469             }
12470         }
12471         return ret;
12472 #endif
12473 
12474     /* Not implemented for now... */
12475 /*     case TARGET_NR_mq_notify: */
12476 /*         break; */
12477 
12478     case TARGET_NR_mq_getsetattr:
12479         {
12480             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12481             ret = 0;
12482             if (arg2 != 0) {
12483                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12484                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12485                                            &posix_mq_attr_out));
12486             } else if (arg3 != 0) {
12487                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12488             }
12489             if (ret == 0 && arg3 != 0) {
12490                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12491             }
12492         }
12493         return ret;
12494 #endif
12495 
12496 #ifdef CONFIG_SPLICE
12497 #ifdef TARGET_NR_tee
12498     case TARGET_NR_tee:
12499         {
12500             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12501         }
12502         return ret;
12503 #endif
12504 #ifdef TARGET_NR_splice
12505     case TARGET_NR_splice:
12506         {
12507             loff_t loff_in, loff_out;
12508             loff_t *ploff_in = NULL, *ploff_out = NULL;
12509             if (arg2) {
12510                 if (get_user_u64(loff_in, arg2)) {
12511                     return -TARGET_EFAULT;
12512                 }
12513                 ploff_in = &loff_in;
12514             }
12515             if (arg4) {
12516                 if (get_user_u64(loff_out, arg4)) {
12517                     return -TARGET_EFAULT;
12518                 }
12519                 ploff_out = &loff_out;
12520             }
12521             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12522             if (arg2) {
12523                 if (put_user_u64(loff_in, arg2)) {
12524                     return -TARGET_EFAULT;
12525                 }
12526             }
12527             if (arg4) {
12528                 if (put_user_u64(loff_out, arg4)) {
12529                     return -TARGET_EFAULT;
12530                 }
12531             }
12532         }
12533         return ret;
12534 #endif
12535 #ifdef TARGET_NR_vmsplice
12536 	case TARGET_NR_vmsplice:
12537         {
12538             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12539             if (vec != NULL) {
12540                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12541                 unlock_iovec(vec, arg2, arg3, 0);
12542             } else {
12543                 ret = -host_to_target_errno(errno);
12544             }
12545         }
12546         return ret;
12547 #endif
12548 #endif /* CONFIG_SPLICE */
12549 #ifdef CONFIG_EVENTFD
12550 #if defined(TARGET_NR_eventfd)
12551     case TARGET_NR_eventfd:
12552         ret = get_errno(eventfd(arg1, 0));
12553         if (ret >= 0) {
12554             fd_trans_register(ret, &target_eventfd_trans);
12555         }
12556         return ret;
12557 #endif
12558 #if defined(TARGET_NR_eventfd2)
12559     case TARGET_NR_eventfd2:
12560     {
12561         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12562         if (arg2 & TARGET_O_NONBLOCK) {
12563             host_flags |= O_NONBLOCK;
12564         }
12565         if (arg2 & TARGET_O_CLOEXEC) {
12566             host_flags |= O_CLOEXEC;
12567         }
12568         ret = get_errno(eventfd(arg1, host_flags));
12569         if (ret >= 0) {
12570             fd_trans_register(ret, &target_eventfd_trans);
12571         }
12572         return ret;
12573     }
12574 #endif
12575 #endif /* CONFIG_EVENTFD  */
12576 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12577     case TARGET_NR_fallocate:
12578 #if TARGET_ABI_BITS == 32
12579         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12580                                   target_offset64(arg5, arg6)));
12581 #else
12582         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12583 #endif
12584         return ret;
12585 #endif
12586 #if defined(CONFIG_SYNC_FILE_RANGE)
12587 #if defined(TARGET_NR_sync_file_range)
12588     case TARGET_NR_sync_file_range:
12589 #if TARGET_ABI_BITS == 32
12590 #if defined(TARGET_MIPS)
12591         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12592                                         target_offset64(arg5, arg6), arg7));
12593 #else
12594         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12595                                         target_offset64(arg4, arg5), arg6));
12596 #endif /* !TARGET_MIPS */
12597 #else
12598         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12599 #endif
12600         return ret;
12601 #endif
12602 #if defined(TARGET_NR_sync_file_range2) || \
12603     defined(TARGET_NR_arm_sync_file_range)
12604 #if defined(TARGET_NR_sync_file_range2)
12605     case TARGET_NR_sync_file_range2:
12606 #endif
12607 #if defined(TARGET_NR_arm_sync_file_range)
12608     case TARGET_NR_arm_sync_file_range:
12609 #endif
12610         /* This is like sync_file_range but the arguments are reordered */
12611 #if TARGET_ABI_BITS == 32
12612         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12613                                         target_offset64(arg5, arg6), arg2));
12614 #else
12615         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12616 #endif
12617         return ret;
12618 #endif
12619 #endif
12620 #if defined(TARGET_NR_signalfd4)
12621     case TARGET_NR_signalfd4:
12622         return do_signalfd4(arg1, arg2, arg4);
12623 #endif
12624 #if defined(TARGET_NR_signalfd)
12625     case TARGET_NR_signalfd:
12626         return do_signalfd4(arg1, arg2, 0);
12627 #endif
12628 #if defined(CONFIG_EPOLL)
12629 #if defined(TARGET_NR_epoll_create)
12630     case TARGET_NR_epoll_create:
12631         return get_errno(epoll_create(arg1));
12632 #endif
12633 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12634     case TARGET_NR_epoll_create1:
12635         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12636 #endif
12637 #if defined(TARGET_NR_epoll_ctl)
12638     case TARGET_NR_epoll_ctl:
12639     {
12640         struct epoll_event ep;
12641         struct epoll_event *epp = 0;
12642         if (arg4) {
12643             if (arg2 != EPOLL_CTL_DEL) {
12644                 struct target_epoll_event *target_ep;
12645                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12646                     return -TARGET_EFAULT;
12647                 }
12648                 ep.events = tswap32(target_ep->events);
12649                 /*
12650                  * The epoll_data_t union is just opaque data to the kernel,
12651                  * so we transfer all 64 bits across and need not worry what
12652                  * actual data type it is.
12653                  */
12654                 ep.data.u64 = tswap64(target_ep->data.u64);
12655                 unlock_user_struct(target_ep, arg4, 0);
12656             }
12657             /*
12658              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12659              * non-null pointer, even though this argument is ignored.
12660              *
12661              */
12662             epp = &ep;
12663         }
12664         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12665     }
12666 #endif
12667 
12668 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12669 #if defined(TARGET_NR_epoll_wait)
12670     case TARGET_NR_epoll_wait:
12671 #endif
12672 #if defined(TARGET_NR_epoll_pwait)
12673     case TARGET_NR_epoll_pwait:
12674 #endif
12675     {
12676         struct target_epoll_event *target_ep;
12677         struct epoll_event *ep;
12678         int epfd = arg1;
12679         int maxevents = arg3;
12680         int timeout = arg4;
12681 
12682         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12683             return -TARGET_EINVAL;
12684         }
12685 
12686         target_ep = lock_user(VERIFY_WRITE, arg2,
12687                               maxevents * sizeof(struct target_epoll_event), 1);
12688         if (!target_ep) {
12689             return -TARGET_EFAULT;
12690         }
12691 
12692         ep = g_try_new(struct epoll_event, maxevents);
12693         if (!ep) {
12694             unlock_user(target_ep, arg2, 0);
12695             return -TARGET_ENOMEM;
12696         }
12697 
12698         switch (num) {
12699 #if defined(TARGET_NR_epoll_pwait)
12700         case TARGET_NR_epoll_pwait:
12701         {
12702             target_sigset_t *target_set;
12703             sigset_t _set, *set = &_set;
12704 
12705             if (arg5) {
12706                 if (arg6 != sizeof(target_sigset_t)) {
12707                     ret = -TARGET_EINVAL;
12708                     break;
12709                 }
12710 
12711                 target_set = lock_user(VERIFY_READ, arg5,
12712                                        sizeof(target_sigset_t), 1);
12713                 if (!target_set) {
12714                     ret = -TARGET_EFAULT;
12715                     break;
12716                 }
12717                 target_to_host_sigset(set, target_set);
12718                 unlock_user(target_set, arg5, 0);
12719             } else {
12720                 set = NULL;
12721             }
12722 
12723             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12724                                              set, SIGSET_T_SIZE));
12725             break;
12726         }
12727 #endif
12728 #if defined(TARGET_NR_epoll_wait)
12729         case TARGET_NR_epoll_wait:
12730             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12731                                              NULL, 0));
12732             break;
12733 #endif
12734         default:
12735             ret = -TARGET_ENOSYS;
12736         }
12737         if (!is_error(ret)) {
12738             int i;
12739             for (i = 0; i < ret; i++) {
12740                 target_ep[i].events = tswap32(ep[i].events);
12741                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12742             }
12743             unlock_user(target_ep, arg2,
12744                         ret * sizeof(struct target_epoll_event));
12745         } else {
12746             unlock_user(target_ep, arg2, 0);
12747         }
12748         g_free(ep);
12749         return ret;
12750     }
12751 #endif
12752 #endif
12753 #ifdef TARGET_NR_prlimit64
12754     case TARGET_NR_prlimit64:
12755     {
12756         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12757         struct target_rlimit64 *target_rnew, *target_rold;
12758         struct host_rlimit64 rnew, rold, *rnewp = 0;
12759         int resource = target_to_host_resource(arg2);
12760 
12761         if (arg3 && (resource != RLIMIT_AS &&
12762                      resource != RLIMIT_DATA &&
12763                      resource != RLIMIT_STACK)) {
12764             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12765                 return -TARGET_EFAULT;
12766             }
12767             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12768             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12769             unlock_user_struct(target_rnew, arg3, 0);
12770             rnewp = &rnew;
12771         }
12772 
12773         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12774         if (!is_error(ret) && arg4) {
12775             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12776                 return -TARGET_EFAULT;
12777             }
12778             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12779             target_rold->rlim_max = tswap64(rold.rlim_max);
12780             unlock_user_struct(target_rold, arg4, 1);
12781         }
12782         return ret;
12783     }
12784 #endif
12785 #ifdef TARGET_NR_gethostname
12786     case TARGET_NR_gethostname:
12787     {
12788         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12789         if (name) {
12790             ret = get_errno(gethostname(name, arg2));
12791             unlock_user(name, arg1, arg2);
12792         } else {
12793             ret = -TARGET_EFAULT;
12794         }
12795         return ret;
12796     }
12797 #endif
12798 #ifdef TARGET_NR_atomic_cmpxchg_32
12799     case TARGET_NR_atomic_cmpxchg_32:
12800     {
12801         /* should use start_exclusive from main.c */
12802         abi_ulong mem_value;
12803         if (get_user_u32(mem_value, arg6)) {
12804             target_siginfo_t info;
12805             info.si_signo = SIGSEGV;
12806             info.si_errno = 0;
12807             info.si_code = TARGET_SEGV_MAPERR;
12808             info._sifields._sigfault._addr = arg6;
12809             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12810                          QEMU_SI_FAULT, &info);
12811             ret = 0xdeadbeef;
12812 
12813         }
12814         if (mem_value == arg2)
12815             put_user_u32(arg1, arg6);
12816         return mem_value;
12817     }
12818 #endif
12819 #ifdef TARGET_NR_atomic_barrier
12820     case TARGET_NR_atomic_barrier:
12821         /* Like the kernel implementation and the
12822            qemu arm barrier, no-op this? */
12823         return 0;
12824 #endif
12825 
12826 #ifdef TARGET_NR_timer_create
12827     case TARGET_NR_timer_create:
12828     {
12829         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12830 
12831         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12832 
12833         int clkid = arg1;
12834         int timer_index = next_free_host_timer();
12835 
12836         if (timer_index < 0) {
12837             ret = -TARGET_EAGAIN;
12838         } else {
12839             timer_t *phtimer = g_posix_timers  + timer_index;
12840 
12841             if (arg2) {
12842                 phost_sevp = &host_sevp;
12843                 ret = target_to_host_sigevent(phost_sevp, arg2);
12844                 if (ret != 0) {
12845                     return ret;
12846                 }
12847             }
12848 
12849             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12850             if (ret) {
12851                 phtimer = NULL;
12852             } else {
12853                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12854                     return -TARGET_EFAULT;
12855                 }
12856             }
12857         }
12858         return ret;
12859     }
12860 #endif
12861 
12862 #ifdef TARGET_NR_timer_settime
12863     case TARGET_NR_timer_settime:
12864     {
12865         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12866          * struct itimerspec * old_value */
12867         target_timer_t timerid = get_timer_id(arg1);
12868 
12869         if (timerid < 0) {
12870             ret = timerid;
12871         } else if (arg3 == 0) {
12872             ret = -TARGET_EINVAL;
12873         } else {
12874             timer_t htimer = g_posix_timers[timerid];
12875             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12876 
12877             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12878                 return -TARGET_EFAULT;
12879             }
12880             ret = get_errno(
12881                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12882             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12883                 return -TARGET_EFAULT;
12884             }
12885         }
12886         return ret;
12887     }
12888 #endif
12889 
12890 #ifdef TARGET_NR_timer_settime64
12891     case TARGET_NR_timer_settime64:
12892     {
12893         target_timer_t timerid = get_timer_id(arg1);
12894 
12895         if (timerid < 0) {
12896             ret = timerid;
12897         } else if (arg3 == 0) {
12898             ret = -TARGET_EINVAL;
12899         } else {
12900             timer_t htimer = g_posix_timers[timerid];
12901             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12902 
12903             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12904                 return -TARGET_EFAULT;
12905             }
12906             ret = get_errno(
12907                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12908             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12909                 return -TARGET_EFAULT;
12910             }
12911         }
12912         return ret;
12913     }
12914 #endif
12915 
12916 #ifdef TARGET_NR_timer_gettime
12917     case TARGET_NR_timer_gettime:
12918     {
12919         /* args: timer_t timerid, struct itimerspec *curr_value */
12920         target_timer_t timerid = get_timer_id(arg1);
12921 
12922         if (timerid < 0) {
12923             ret = timerid;
12924         } else if (!arg2) {
12925             ret = -TARGET_EFAULT;
12926         } else {
12927             timer_t htimer = g_posix_timers[timerid];
12928             struct itimerspec hspec;
12929             ret = get_errno(timer_gettime(htimer, &hspec));
12930 
12931             if (host_to_target_itimerspec(arg2, &hspec)) {
12932                 ret = -TARGET_EFAULT;
12933             }
12934         }
12935         return ret;
12936     }
12937 #endif
12938 
12939 #ifdef TARGET_NR_timer_gettime64
12940     case TARGET_NR_timer_gettime64:
12941     {
12942         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12943         target_timer_t timerid = get_timer_id(arg1);
12944 
12945         if (timerid < 0) {
12946             ret = timerid;
12947         } else if (!arg2) {
12948             ret = -TARGET_EFAULT;
12949         } else {
12950             timer_t htimer = g_posix_timers[timerid];
12951             struct itimerspec hspec;
12952             ret = get_errno(timer_gettime(htimer, &hspec));
12953 
12954             if (host_to_target_itimerspec64(arg2, &hspec)) {
12955                 ret = -TARGET_EFAULT;
12956             }
12957         }
12958         return ret;
12959     }
12960 #endif
12961 
12962 #ifdef TARGET_NR_timer_getoverrun
12963     case TARGET_NR_timer_getoverrun:
12964     {
12965         /* args: timer_t timerid */
12966         target_timer_t timerid = get_timer_id(arg1);
12967 
12968         if (timerid < 0) {
12969             ret = timerid;
12970         } else {
12971             timer_t htimer = g_posix_timers[timerid];
12972             ret = get_errno(timer_getoverrun(htimer));
12973         }
12974         return ret;
12975     }
12976 #endif
12977 
12978 #ifdef TARGET_NR_timer_delete
12979     case TARGET_NR_timer_delete:
12980     {
12981         /* args: timer_t timerid */
12982         target_timer_t timerid = get_timer_id(arg1);
12983 
12984         if (timerid < 0) {
12985             ret = timerid;
12986         } else {
12987             timer_t htimer = g_posix_timers[timerid];
12988             ret = get_errno(timer_delete(htimer));
12989             g_posix_timers[timerid] = 0;
12990         }
12991         return ret;
12992     }
12993 #endif
12994 
12995 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12996     case TARGET_NR_timerfd_create:
12997         return get_errno(timerfd_create(arg1,
12998                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12999 #endif
13000 
13001 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13002     case TARGET_NR_timerfd_gettime:
13003         {
13004             struct itimerspec its_curr;
13005 
13006             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13007 
13008             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13009                 return -TARGET_EFAULT;
13010             }
13011         }
13012         return ret;
13013 #endif
13014 
13015 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13016     case TARGET_NR_timerfd_gettime64:
13017         {
13018             struct itimerspec its_curr;
13019 
13020             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13021 
13022             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13023                 return -TARGET_EFAULT;
13024             }
13025         }
13026         return ret;
13027 #endif
13028 
13029 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13030     case TARGET_NR_timerfd_settime:
13031         {
13032             struct itimerspec its_new, its_old, *p_new;
13033 
13034             if (arg3) {
13035                 if (target_to_host_itimerspec(&its_new, arg3)) {
13036                     return -TARGET_EFAULT;
13037                 }
13038                 p_new = &its_new;
13039             } else {
13040                 p_new = NULL;
13041             }
13042 
13043             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13044 
13045             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13046                 return -TARGET_EFAULT;
13047             }
13048         }
13049         return ret;
13050 #endif
13051 
13052 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13053     case TARGET_NR_timerfd_settime64:
13054         {
13055             struct itimerspec its_new, its_old, *p_new;
13056 
13057             if (arg3) {
13058                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13059                     return -TARGET_EFAULT;
13060                 }
13061                 p_new = &its_new;
13062             } else {
13063                 p_new = NULL;
13064             }
13065 
13066             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13067 
13068             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13069                 return -TARGET_EFAULT;
13070             }
13071         }
13072         return ret;
13073 #endif
13074 
13075 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13076     case TARGET_NR_ioprio_get:
13077         return get_errno(ioprio_get(arg1, arg2));
13078 #endif
13079 
13080 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13081     case TARGET_NR_ioprio_set:
13082         return get_errno(ioprio_set(arg1, arg2, arg3));
13083 #endif
13084 
13085 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13086     case TARGET_NR_setns:
13087         return get_errno(setns(arg1, arg2));
13088 #endif
13089 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13090     case TARGET_NR_unshare:
13091         return get_errno(unshare(arg1));
13092 #endif
13093 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13094     case TARGET_NR_kcmp:
13095         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13096 #endif
13097 #ifdef TARGET_NR_swapcontext
13098     case TARGET_NR_swapcontext:
13099         /* PowerPC specific.  */
13100         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13101 #endif
13102 #ifdef TARGET_NR_memfd_create
13103     case TARGET_NR_memfd_create:
13104         p = lock_user_string(arg1);
13105         if (!p) {
13106             return -TARGET_EFAULT;
13107         }
13108         ret = get_errno(memfd_create(p, arg2));
13109         fd_trans_unregister(ret);
13110         unlock_user(p, arg1, 0);
13111         return ret;
13112 #endif
13113 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13114     case TARGET_NR_membarrier:
13115         return get_errno(membarrier(arg1, arg2));
13116 #endif
13117 
13118 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13119     case TARGET_NR_copy_file_range:
13120         {
13121             loff_t inoff, outoff;
13122             loff_t *pinoff = NULL, *poutoff = NULL;
13123 
13124             if (arg2) {
13125                 if (get_user_u64(inoff, arg2)) {
13126                     return -TARGET_EFAULT;
13127                 }
13128                 pinoff = &inoff;
13129             }
13130             if (arg4) {
13131                 if (get_user_u64(outoff, arg4)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134                 poutoff = &outoff;
13135             }
13136             /* Do not sign-extend the count parameter. */
13137             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13138                                                  (abi_ulong)arg5, arg6));
13139             if (!is_error(ret) && ret > 0) {
13140                 if (arg2) {
13141                     if (put_user_u64(inoff, arg2)) {
13142                         return -TARGET_EFAULT;
13143                     }
13144                 }
13145                 if (arg4) {
13146                     if (put_user_u64(outoff, arg4)) {
13147                         return -TARGET_EFAULT;
13148                     }
13149                 }
13150             }
13151         }
13152         return ret;
13153 #endif
13154 
13155 #if defined(TARGET_NR_pivot_root)
13156     case TARGET_NR_pivot_root:
13157         {
13158             void *p2;
13159             p = lock_user_string(arg1); /* new_root */
13160             p2 = lock_user_string(arg2); /* put_old */
13161             if (!p || !p2) {
13162                 ret = -TARGET_EFAULT;
13163             } else {
13164                 ret = get_errno(pivot_root(p, p2));
13165             }
13166             unlock_user(p2, arg2, 0);
13167             unlock_user(p, arg1, 0);
13168         }
13169         return ret;
13170 #endif
13171 
13172     default:
13173         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13174         return -TARGET_ENOSYS;
13175     }
13176     return ret;
13177 }
13178 
13179 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13180                     abi_long arg2, abi_long arg3, abi_long arg4,
13181                     abi_long arg5, abi_long arg6, abi_long arg7,
13182                     abi_long arg8)
13183 {
13184     CPUState *cpu = env_cpu(cpu_env);
13185     abi_long ret;
13186 
13187 #ifdef DEBUG_ERESTARTSYS
13188     /* Debug-only code for exercising the syscall-restart code paths
13189      * in the per-architecture cpu main loops: restart every syscall
13190      * the guest makes once before letting it through.
13191      */
13192     {
13193         static bool flag;
13194         flag = !flag;
13195         if (flag) {
13196             return -QEMU_ERESTARTSYS;
13197         }
13198     }
13199 #endif
13200 
13201     record_syscall_start(cpu, num, arg1,
13202                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13203 
13204     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13205         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13206     }
13207 
13208     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13209                       arg5, arg6, arg7, arg8);
13210 
13211     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13212         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13213                           arg3, arg4, arg5, arg6);
13214     }
13215 
13216     record_syscall_return(cpu, num, ret);
13217     return ret;
13218 }
13219