xref: /openbmc/qemu/linux-user/syscall.c (revision 803ca43e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342     uint32_t size;
343     uint32_t sched_policy;
344     uint64_t sched_flags;
345     int32_t sched_nice;
346     uint32_t sched_priority;
347     uint64_t sched_runtime;
348     uint64_t sched_deadline;
349     uint64_t sched_period;
350     uint32_t sched_util_min;
351     uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363           const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366           struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369           const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373           void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375           struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377           struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387 
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390           unsigned long, idx1, unsigned long, idx2)
391 #endif
392 
393 /*
394  * It is assumed that struct statx is architecture independent.
395  */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398           unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403 
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
406   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
407   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
408   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
409   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
410   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
411   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
412   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
413   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
414   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
415   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
416   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
418 #if defined(O_DIRECT)
419   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
420 #endif
421 #if defined(O_NOATIME)
422   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
423 #endif
424 #if defined(O_CLOEXEC)
425   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
426 #endif
427 #if defined(O_PATH)
428   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
429 #endif
430 #if defined(O_TMPFILE)
431   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
432 #endif
433   /* Don't terminate the list prematurely on 64-bit host+guest.  */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437   { 0, 0, 0, 0 }
438 };
439 
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441 
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446           const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449                          const struct timespec times[2], int flags)
450 {
451     errno = ENOSYS;
452     return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456 
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461           const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464                          int newfd, const char *new, int flags)
465 {
466     if (flags == 0) {
467         return renameat(oldfd, old, newfd, new);
468     }
469     errno = ENOSYS;
470     return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474 
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563     int i;
564     uint8_t b;
565     if (usize <= ksize) {
566         return 1;
567     }
568     for (i = ksize; i < usize; i++) {
569         if (get_user_u8(b, addr + i)) {
570             return -TARGET_EFAULT;
571         }
572         if (b != 0) {
573             return 0;
574         }
575     }
576     return 1;
577 }
578 
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582     return safe_syscall(__NR_##name); \
583 }
584 
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588     return safe_syscall(__NR_##name, arg1); \
589 }
590 
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596 
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602 
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604     type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609 
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611     type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613     type5 arg5) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617 
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621     type5 arg5, type6 arg6) \
622 { \
623     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625 
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629               int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632               struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635               int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644               struct timespec *, tsp, const sigset_t *, sigmask,
645               size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648               int, maxevents, int, timeout, const sigset_t *, sigmask,
649               size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652               const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656               const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665               unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667               unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669               socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679               const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682               int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685               struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688     defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690               const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698               void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703               int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707               long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711               unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714     defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716               size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719     defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721               size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725               int, outfd, loff_t *, poutoff, size_t, length,
726               unsigned int, flags)
727 #endif
728 
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730  * "third argument might be integer or pointer or not present" behaviour of
731  * the libc function.
732  */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736  *  use the flock64 struct rather than unsuffixed flock
737  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738  */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744 
745 static inline int host_to_target_sock_type(int host_type)
746 {
747     int target_type;
748 
749     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750     case SOCK_DGRAM:
751         target_type = TARGET_SOCK_DGRAM;
752         break;
753     case SOCK_STREAM:
754         target_type = TARGET_SOCK_STREAM;
755         break;
756     default:
757         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758         break;
759     }
760 
761 #if defined(SOCK_CLOEXEC)
762     if (host_type & SOCK_CLOEXEC) {
763         target_type |= TARGET_SOCK_CLOEXEC;
764     }
765 #endif
766 
767 #if defined(SOCK_NONBLOCK)
768     if (host_type & SOCK_NONBLOCK) {
769         target_type |= TARGET_SOCK_NONBLOCK;
770     }
771 #endif
772 
773     return target_type;
774 }
775 
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779 
780 void target_set_brk(abi_ulong new_brk)
781 {
782     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783     brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785 
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788 
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792     abi_long mapped_addr;
793     abi_ulong new_alloc_size;
794 
795     /* brk pointers are always untagged */
796 
797     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798 
799     if (!new_brk) {
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801         return target_brk;
802     }
803     if (new_brk < target_original_brk) {
804         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805                    target_brk);
806         return target_brk;
807     }
808 
809     /* If the new brk is less than the highest page reserved to the
810      * target heap allocation, set it and we're almost done...  */
811     if (new_brk <= brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  */
814         if (new_brk > target_brk) {
815             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816         }
817 	target_brk = new_brk;
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 	return target_brk;
820     }
821 
822     /* We need to allocate more memory after the brk... Note that
823      * we don't use MAP_FIXED because that will map over the top of
824      * any existing mapping (like the one with the host libc or qemu
825      * itself); instead we treat "mapped but at wrong address" as
826      * a failure and unmap again.
827      */
828     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830                                         PROT_READ|PROT_WRITE,
831                                         MAP_ANON|MAP_PRIVATE, 0, 0));
832 
833     if (mapped_addr == brk_page) {
834         /* Heap contents are initialized to zero, as for anonymous
835          * mapped pages.  Technically the new pages are already
836          * initialized to zero since they *are* anonymous mapped
837          * pages, however we have to take care with the contents that
838          * come from the remaining part of the previous page: it may
839          * contains garbage data due to a previous heap usage (grown
840          * then shrunken).  */
841         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842 
843         target_brk = new_brk;
844         brk_page = HOST_PAGE_ALIGN(target_brk);
845         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846             target_brk);
847         return target_brk;
848     } else if (mapped_addr != -1) {
849         /* Mapped but at wrong address, meaning there wasn't actually
850          * enough space for this brk.
851          */
852         target_munmap(mapped_addr, new_alloc_size);
853         mapped_addr = -1;
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855     }
856     else {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858     }
859 
860 #if defined(TARGET_ALPHA)
861     /* We (partially) emulate OSF/1 on Alpha, which requires we
862        return a proper errno, not an unchanged brk value.  */
863     return -TARGET_ENOMEM;
864 #endif
865     /* For everything else, return the previous break. */
866     return target_brk;
867 }
868 
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872                                             abi_ulong target_fds_addr,
873                                             int n)
874 {
875     int i, nw, j, k;
876     abi_ulong b, *target_fds;
877 
878     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879     if (!(target_fds = lock_user(VERIFY_READ,
880                                  target_fds_addr,
881                                  sizeof(abi_ulong) * nw,
882                                  1)))
883         return -TARGET_EFAULT;
884 
885     FD_ZERO(fds);
886     k = 0;
887     for (i = 0; i < nw; i++) {
888         /* grab the abi_ulong */
889         __get_user(b, &target_fds[i]);
890         for (j = 0; j < TARGET_ABI_BITS; j++) {
891             /* check the bit inside the abi_ulong */
892             if ((b >> j) & 1)
893                 FD_SET(k, fds);
894             k++;
895         }
896     }
897 
898     unlock_user(target_fds, target_fds_addr, 0);
899 
900     return 0;
901 }
902 
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904                                                  abi_ulong target_fds_addr,
905                                                  int n)
906 {
907     if (target_fds_addr) {
908         if (copy_from_user_fdset(fds, target_fds_addr, n))
909             return -TARGET_EFAULT;
910         *fds_ptr = fds;
911     } else {
912         *fds_ptr = NULL;
913     }
914     return 0;
915 }
916 
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918                                           const fd_set *fds,
919                                           int n)
920 {
921     int i, nw, j, k;
922     abi_long v;
923     abi_ulong *target_fds;
924 
925     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926     if (!(target_fds = lock_user(VERIFY_WRITE,
927                                  target_fds_addr,
928                                  sizeof(abi_ulong) * nw,
929                                  0)))
930         return -TARGET_EFAULT;
931 
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         v = 0;
935         for (j = 0; j < TARGET_ABI_BITS; j++) {
936             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937             k++;
938         }
939         __put_user(v, &target_fds[i]);
940     }
941 
942     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943 
944     return 0;
945 }
946 #endif
947 
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953 
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957     return ticks;
958 #else
959     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962 
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964                                              const struct rusage *rusage)
965 {
966     struct target_rusage *target_rusage;
967 
968     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969         return -TARGET_EFAULT;
970     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988     unlock_user_struct(target_rusage, target_addr, 1);
989 
990     return 0;
991 }
992 
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996     abi_ulong target_rlim_swap;
997     rlim_t result;
998 
999     target_rlim_swap = tswapal(target_rlim);
1000     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001         return RLIM_INFINITY;
1002 
1003     result = target_rlim_swap;
1004     if (target_rlim_swap != (rlim_t)result)
1005         return RLIM_INFINITY;
1006 
1007     return result;
1008 }
1009 #endif
1010 
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014     abi_ulong target_rlim_swap;
1015     abi_ulong result;
1016 
1017     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018         target_rlim_swap = TARGET_RLIM_INFINITY;
1019     else
1020         target_rlim_swap = rlim;
1021     result = tswapal(target_rlim_swap);
1022 
1023     return result;
1024 }
1025 #endif
1026 
1027 static inline int target_to_host_resource(int code)
1028 {
1029     switch (code) {
1030     case TARGET_RLIMIT_AS:
1031         return RLIMIT_AS;
1032     case TARGET_RLIMIT_CORE:
1033         return RLIMIT_CORE;
1034     case TARGET_RLIMIT_CPU:
1035         return RLIMIT_CPU;
1036     case TARGET_RLIMIT_DATA:
1037         return RLIMIT_DATA;
1038     case TARGET_RLIMIT_FSIZE:
1039         return RLIMIT_FSIZE;
1040     case TARGET_RLIMIT_LOCKS:
1041         return RLIMIT_LOCKS;
1042     case TARGET_RLIMIT_MEMLOCK:
1043         return RLIMIT_MEMLOCK;
1044     case TARGET_RLIMIT_MSGQUEUE:
1045         return RLIMIT_MSGQUEUE;
1046     case TARGET_RLIMIT_NICE:
1047         return RLIMIT_NICE;
1048     case TARGET_RLIMIT_NOFILE:
1049         return RLIMIT_NOFILE;
1050     case TARGET_RLIMIT_NPROC:
1051         return RLIMIT_NPROC;
1052     case TARGET_RLIMIT_RSS:
1053         return RLIMIT_RSS;
1054     case TARGET_RLIMIT_RTPRIO:
1055         return RLIMIT_RTPRIO;
1056 #ifdef RLIMIT_RTTIME
1057     case TARGET_RLIMIT_RTTIME:
1058         return RLIMIT_RTTIME;
1059 #endif
1060     case TARGET_RLIMIT_SIGPENDING:
1061         return RLIMIT_SIGPENDING;
1062     case TARGET_RLIMIT_STACK:
1063         return RLIMIT_STACK;
1064     default:
1065         return code;
1066     }
1067 }
1068 
1069 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1070                                               abi_ulong target_tv_addr)
1071 {
1072     struct target_timeval *target_tv;
1073 
1074     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1075         return -TARGET_EFAULT;
1076     }
1077 
1078     __get_user(tv->tv_sec, &target_tv->tv_sec);
1079     __get_user(tv->tv_usec, &target_tv->tv_usec);
1080 
1081     unlock_user_struct(target_tv, target_tv_addr, 0);
1082 
1083     return 0;
1084 }
1085 
1086 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1087                                             const struct timeval *tv)
1088 {
1089     struct target_timeval *target_tv;
1090 
1091     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1092         return -TARGET_EFAULT;
1093     }
1094 
1095     __put_user(tv->tv_sec, &target_tv->tv_sec);
1096     __put_user(tv->tv_usec, &target_tv->tv_usec);
1097 
1098     unlock_user_struct(target_tv, target_tv_addr, 1);
1099 
1100     return 0;
1101 }
1102 
1103 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1104 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1105                                                 abi_ulong target_tv_addr)
1106 {
1107     struct target__kernel_sock_timeval *target_tv;
1108 
1109     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1110         return -TARGET_EFAULT;
1111     }
1112 
1113     __get_user(tv->tv_sec, &target_tv->tv_sec);
1114     __get_user(tv->tv_usec, &target_tv->tv_usec);
1115 
1116     unlock_user_struct(target_tv, target_tv_addr, 0);
1117 
1118     return 0;
1119 }
1120 #endif
1121 
1122 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1123                                               const struct timeval *tv)
1124 {
1125     struct target__kernel_sock_timeval *target_tv;
1126 
1127     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1128         return -TARGET_EFAULT;
1129     }
1130 
1131     __put_user(tv->tv_sec, &target_tv->tv_sec);
1132     __put_user(tv->tv_usec, &target_tv->tv_usec);
1133 
1134     unlock_user_struct(target_tv, target_tv_addr, 1);
1135 
1136     return 0;
1137 }
1138 
1139 #if defined(TARGET_NR_futex) || \
1140     defined(TARGET_NR_rt_sigtimedwait) || \
1141     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1142     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1143     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1144     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1145     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1146     defined(TARGET_NR_timer_settime) || \
1147     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1148 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1149                                                abi_ulong target_addr)
1150 {
1151     struct target_timespec *target_ts;
1152 
1153     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1154         return -TARGET_EFAULT;
1155     }
1156     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1157     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1158     unlock_user_struct(target_ts, target_addr, 0);
1159     return 0;
1160 }
1161 #endif
1162 
1163 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1164     defined(TARGET_NR_timer_settime64) || \
1165     defined(TARGET_NR_mq_timedsend_time64) || \
1166     defined(TARGET_NR_mq_timedreceive_time64) || \
1167     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1168     defined(TARGET_NR_clock_nanosleep_time64) || \
1169     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1170     defined(TARGET_NR_utimensat) || \
1171     defined(TARGET_NR_utimensat_time64) || \
1172     defined(TARGET_NR_semtimedop_time64) || \
1173     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1174 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1175                                                  abi_ulong target_addr)
1176 {
1177     struct target__kernel_timespec *target_ts;
1178 
1179     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1180         return -TARGET_EFAULT;
1181     }
1182     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1183     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1184     /* in 32bit mode, this drops the padding */
1185     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 #endif
1190 
1191 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1192                                                struct timespec *host_ts)
1193 {
1194     struct target_timespec *target_ts;
1195 
1196     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1197         return -TARGET_EFAULT;
1198     }
1199     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1200     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1201     unlock_user_struct(target_ts, target_addr, 1);
1202     return 0;
1203 }
1204 
1205 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1206                                                  struct timespec *host_ts)
1207 {
1208     struct target__kernel_timespec *target_ts;
1209 
1210     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1211         return -TARGET_EFAULT;
1212     }
1213     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1214     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1215     unlock_user_struct(target_ts, target_addr, 1);
1216     return 0;
1217 }
1218 
1219 #if defined(TARGET_NR_gettimeofday)
1220 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1221                                              struct timezone *tz)
1222 {
1223     struct target_timezone *target_tz;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1226         return -TARGET_EFAULT;
1227     }
1228 
1229     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1230     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1231 
1232     unlock_user_struct(target_tz, target_tz_addr, 1);
1233 
1234     return 0;
1235 }
1236 #endif
1237 
1238 #if defined(TARGET_NR_settimeofday)
1239 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1240                                                abi_ulong target_tz_addr)
1241 {
1242     struct target_timezone *target_tz;
1243 
1244     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1245         return -TARGET_EFAULT;
1246     }
1247 
1248     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1249     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1250 
1251     unlock_user_struct(target_tz, target_tz_addr, 0);
1252 
1253     return 0;
1254 }
1255 #endif
1256 
1257 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1258 #include <mqueue.h>
1259 
1260 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1261                                               abi_ulong target_mq_attr_addr)
1262 {
1263     struct target_mq_attr *target_mq_attr;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1266                           target_mq_attr_addr, 1))
1267         return -TARGET_EFAULT;
1268 
1269     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1270     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1271     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1272     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1273 
1274     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1275 
1276     return 0;
1277 }
1278 
1279 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1280                                             const struct mq_attr *attr)
1281 {
1282     struct target_mq_attr *target_mq_attr;
1283 
1284     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1285                           target_mq_attr_addr, 0))
1286         return -TARGET_EFAULT;
1287 
1288     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1289     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1290     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1291     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1292 
1293     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1294 
1295     return 0;
1296 }
1297 #endif
1298 
1299 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1300 /* do_select() must return target values and target errnos. */
1301 static abi_long do_select(int n,
1302                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1303                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1304 {
1305     fd_set rfds, wfds, efds;
1306     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1307     struct timeval tv;
1308     struct timespec ts, *ts_ptr;
1309     abi_long ret;
1310 
1311     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1312     if (ret) {
1313         return ret;
1314     }
1315     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1316     if (ret) {
1317         return ret;
1318     }
1319     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1320     if (ret) {
1321         return ret;
1322     }
1323 
1324     if (target_tv_addr) {
1325         if (copy_from_user_timeval(&tv, target_tv_addr))
1326             return -TARGET_EFAULT;
1327         ts.tv_sec = tv.tv_sec;
1328         ts.tv_nsec = tv.tv_usec * 1000;
1329         ts_ptr = &ts;
1330     } else {
1331         ts_ptr = NULL;
1332     }
1333 
1334     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1335                                   ts_ptr, NULL));
1336 
1337     if (!is_error(ret)) {
1338         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1339             return -TARGET_EFAULT;
1340         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1341             return -TARGET_EFAULT;
1342         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1343             return -TARGET_EFAULT;
1344 
1345         if (target_tv_addr) {
1346             tv.tv_sec = ts.tv_sec;
1347             tv.tv_usec = ts.tv_nsec / 1000;
1348             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1349                 return -TARGET_EFAULT;
1350             }
1351         }
1352     }
1353 
1354     return ret;
1355 }
1356 
1357 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1358 static abi_long do_old_select(abi_ulong arg1)
1359 {
1360     struct target_sel_arg_struct *sel;
1361     abi_ulong inp, outp, exp, tvp;
1362     long nsel;
1363 
1364     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1365         return -TARGET_EFAULT;
1366     }
1367 
1368     nsel = tswapal(sel->n);
1369     inp = tswapal(sel->inp);
1370     outp = tswapal(sel->outp);
1371     exp = tswapal(sel->exp);
1372     tvp = tswapal(sel->tvp);
1373 
1374     unlock_user_struct(sel, arg1, 0);
1375 
1376     return do_select(nsel, inp, outp, exp, tvp);
1377 }
1378 #endif
1379 #endif
1380 
1381 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1382 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1383                             abi_long arg4, abi_long arg5, abi_long arg6,
1384                             bool time64)
1385 {
1386     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1387     fd_set rfds, wfds, efds;
1388     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1389     struct timespec ts, *ts_ptr;
1390     abi_long ret;
1391 
1392     /*
1393      * The 6th arg is actually two args smashed together,
1394      * so we cannot use the C library.
1395      */
1396     struct {
1397         sigset_t *set;
1398         size_t size;
1399     } sig, *sig_ptr;
1400 
1401     abi_ulong arg_sigset, arg_sigsize, *arg7;
1402 
1403     n = arg1;
1404     rfd_addr = arg2;
1405     wfd_addr = arg3;
1406     efd_addr = arg4;
1407     ts_addr = arg5;
1408 
1409     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414     if (ret) {
1415         return ret;
1416     }
1417     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418     if (ret) {
1419         return ret;
1420     }
1421 
1422     /*
1423      * This takes a timespec, and not a timeval, so we cannot
1424      * use the do_select() helper ...
1425      */
1426     if (ts_addr) {
1427         if (time64) {
1428             if (target_to_host_timespec64(&ts, ts_addr)) {
1429                 return -TARGET_EFAULT;
1430             }
1431         } else {
1432             if (target_to_host_timespec(&ts, ts_addr)) {
1433                 return -TARGET_EFAULT;
1434             }
1435         }
1436             ts_ptr = &ts;
1437     } else {
1438         ts_ptr = NULL;
1439     }
1440 
1441     /* Extract the two packed args for the sigset */
1442     sig_ptr = NULL;
1443     if (arg6) {
1444         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1445         if (!arg7) {
1446             return -TARGET_EFAULT;
1447         }
1448         arg_sigset = tswapal(arg7[0]);
1449         arg_sigsize = tswapal(arg7[1]);
1450         unlock_user(arg7, arg6, 0);
1451 
1452         if (arg_sigset) {
1453             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1454             if (ret != 0) {
1455                 return ret;
1456             }
1457             sig_ptr = &sig;
1458             sig.size = SIGSET_T_SIZE;
1459         }
1460     }
1461 
1462     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1463                                   ts_ptr, sig_ptr));
1464 
1465     if (sig_ptr) {
1466         finish_sigsuspend_mask(ret);
1467     }
1468 
1469     if (!is_error(ret)) {
1470         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1471             return -TARGET_EFAULT;
1472         }
1473         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1474             return -TARGET_EFAULT;
1475         }
1476         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1477             return -TARGET_EFAULT;
1478         }
1479         if (time64) {
1480             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1481                 return -TARGET_EFAULT;
1482             }
1483         } else {
1484             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1485                 return -TARGET_EFAULT;
1486             }
1487         }
1488     }
1489     return ret;
1490 }
1491 #endif
1492 
1493 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1494     defined(TARGET_NR_ppoll_time64)
1495 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1496                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1497 {
1498     struct target_pollfd *target_pfd;
1499     unsigned int nfds = arg2;
1500     struct pollfd *pfd;
1501     unsigned int i;
1502     abi_long ret;
1503 
1504     pfd = NULL;
1505     target_pfd = NULL;
1506     if (nfds) {
1507         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1508             return -TARGET_EINVAL;
1509         }
1510         target_pfd = lock_user(VERIFY_WRITE, arg1,
1511                                sizeof(struct target_pollfd) * nfds, 1);
1512         if (!target_pfd) {
1513             return -TARGET_EFAULT;
1514         }
1515 
1516         pfd = alloca(sizeof(struct pollfd) * nfds);
1517         for (i = 0; i < nfds; i++) {
1518             pfd[i].fd = tswap32(target_pfd[i].fd);
1519             pfd[i].events = tswap16(target_pfd[i].events);
1520         }
1521     }
1522     if (ppoll) {
1523         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1524         sigset_t *set = NULL;
1525 
1526         if (arg3) {
1527             if (time64) {
1528                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1529                     unlock_user(target_pfd, arg1, 0);
1530                     return -TARGET_EFAULT;
1531                 }
1532             } else {
1533                 if (target_to_host_timespec(timeout_ts, arg3)) {
1534                     unlock_user(target_pfd, arg1, 0);
1535                     return -TARGET_EFAULT;
1536                 }
1537             }
1538         } else {
1539             timeout_ts = NULL;
1540         }
1541 
1542         if (arg4) {
1543             ret = process_sigsuspend_mask(&set, arg4, arg5);
1544             if (ret != 0) {
1545                 unlock_user(target_pfd, arg1, 0);
1546                 return ret;
1547             }
1548         }
1549 
1550         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1551                                    set, SIGSET_T_SIZE));
1552 
1553         if (set) {
1554             finish_sigsuspend_mask(ret);
1555         }
1556         if (!is_error(ret) && arg3) {
1557             if (time64) {
1558                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1559                     return -TARGET_EFAULT;
1560                 }
1561             } else {
1562                 if (host_to_target_timespec(arg3, timeout_ts)) {
1563                     return -TARGET_EFAULT;
1564                 }
1565             }
1566         }
1567     } else {
1568           struct timespec ts, *pts;
1569 
1570           if (arg3 >= 0) {
1571               /* Convert ms to secs, ns */
1572               ts.tv_sec = arg3 / 1000;
1573               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1574               pts = &ts;
1575           } else {
1576               /* -ve poll() timeout means "infinite" */
1577               pts = NULL;
1578           }
1579           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1580     }
1581 
1582     if (!is_error(ret)) {
1583         for (i = 0; i < nfds; i++) {
1584             target_pfd[i].revents = tswap16(pfd[i].revents);
1585         }
1586     }
1587     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1588     return ret;
1589 }
1590 #endif
1591 
1592 static abi_long do_pipe2(int host_pipe[], int flags)
1593 {
1594 #ifdef CONFIG_PIPE2
1595     return pipe2(host_pipe, flags);
1596 #else
1597     return -ENOSYS;
1598 #endif
1599 }
1600 
1601 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1602                         int flags, int is_pipe2)
1603 {
1604     int host_pipe[2];
1605     abi_long ret;
1606     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1607 
1608     if (is_error(ret))
1609         return get_errno(ret);
1610 
1611     /* Several targets have special calling conventions for the original
1612        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1613     if (!is_pipe2) {
1614 #if defined(TARGET_ALPHA)
1615         cpu_env->ir[IR_A4] = host_pipe[1];
1616         return host_pipe[0];
1617 #elif defined(TARGET_MIPS)
1618         cpu_env->active_tc.gpr[3] = host_pipe[1];
1619         return host_pipe[0];
1620 #elif defined(TARGET_SH4)
1621         cpu_env->gregs[1] = host_pipe[1];
1622         return host_pipe[0];
1623 #elif defined(TARGET_SPARC)
1624         cpu_env->regwptr[1] = host_pipe[1];
1625         return host_pipe[0];
1626 #endif
1627     }
1628 
1629     if (put_user_s32(host_pipe[0], pipedes)
1630         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1631         return -TARGET_EFAULT;
1632     return get_errno(ret);
1633 }
1634 
1635 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1636                                               abi_ulong target_addr,
1637                                               socklen_t len)
1638 {
1639     struct target_ip_mreqn *target_smreqn;
1640 
1641     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1642     if (!target_smreqn)
1643         return -TARGET_EFAULT;
1644     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1645     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1646     if (len == sizeof(struct target_ip_mreqn))
1647         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1648     unlock_user(target_smreqn, target_addr, 0);
1649 
1650     return 0;
1651 }
1652 
1653 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1654                                                abi_ulong target_addr,
1655                                                socklen_t len)
1656 {
1657     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1658     sa_family_t sa_family;
1659     struct target_sockaddr *target_saddr;
1660 
1661     if (fd_trans_target_to_host_addr(fd)) {
1662         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1663     }
1664 
1665     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1666     if (!target_saddr)
1667         return -TARGET_EFAULT;
1668 
1669     sa_family = tswap16(target_saddr->sa_family);
1670 
1671     /* Oops. The caller might send a incomplete sun_path; sun_path
1672      * must be terminated by \0 (see the manual page), but
1673      * unfortunately it is quite common to specify sockaddr_un
1674      * length as "strlen(x->sun_path)" while it should be
1675      * "strlen(...) + 1". We'll fix that here if needed.
1676      * Linux kernel has a similar feature.
1677      */
1678 
1679     if (sa_family == AF_UNIX) {
1680         if (len < unix_maxlen && len > 0) {
1681             char *cp = (char*)target_saddr;
1682 
1683             if ( cp[len-1] && !cp[len] )
1684                 len++;
1685         }
1686         if (len > unix_maxlen)
1687             len = unix_maxlen;
1688     }
1689 
1690     memcpy(addr, target_saddr, len);
1691     addr->sa_family = sa_family;
1692     if (sa_family == AF_NETLINK) {
1693         struct sockaddr_nl *nladdr;
1694 
1695         nladdr = (struct sockaddr_nl *)addr;
1696         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1697         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1698     } else if (sa_family == AF_PACKET) {
1699 	struct target_sockaddr_ll *lladdr;
1700 
1701 	lladdr = (struct target_sockaddr_ll *)addr;
1702 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1703 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1704     }
1705     unlock_user(target_saddr, target_addr, 0);
1706 
1707     return 0;
1708 }
1709 
1710 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1711                                                struct sockaddr *addr,
1712                                                socklen_t len)
1713 {
1714     struct target_sockaddr *target_saddr;
1715 
1716     if (len == 0) {
1717         return 0;
1718     }
1719     assert(addr);
1720 
1721     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1722     if (!target_saddr)
1723         return -TARGET_EFAULT;
1724     memcpy(target_saddr, addr, len);
1725     if (len >= offsetof(struct target_sockaddr, sa_family) +
1726         sizeof(target_saddr->sa_family)) {
1727         target_saddr->sa_family = tswap16(addr->sa_family);
1728     }
1729     if (addr->sa_family == AF_NETLINK &&
1730         len >= sizeof(struct target_sockaddr_nl)) {
1731         struct target_sockaddr_nl *target_nl =
1732                (struct target_sockaddr_nl *)target_saddr;
1733         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1734         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1735     } else if (addr->sa_family == AF_PACKET) {
1736         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1737         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1738         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1739     } else if (addr->sa_family == AF_INET6 &&
1740                len >= sizeof(struct target_sockaddr_in6)) {
1741         struct target_sockaddr_in6 *target_in6 =
1742                (struct target_sockaddr_in6 *)target_saddr;
1743         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1744     }
1745     unlock_user(target_saddr, target_addr, len);
1746 
1747     return 0;
1748 }
1749 
1750 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1751                                            struct target_msghdr *target_msgh)
1752 {
1753     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1754     abi_long msg_controllen;
1755     abi_ulong target_cmsg_addr;
1756     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1757     socklen_t space = 0;
1758 
1759     msg_controllen = tswapal(target_msgh->msg_controllen);
1760     if (msg_controllen < sizeof (struct target_cmsghdr))
1761         goto the_end;
1762     target_cmsg_addr = tswapal(target_msgh->msg_control);
1763     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1764     target_cmsg_start = target_cmsg;
1765     if (!target_cmsg)
1766         return -TARGET_EFAULT;
1767 
1768     while (cmsg && target_cmsg) {
1769         void *data = CMSG_DATA(cmsg);
1770         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1771 
1772         int len = tswapal(target_cmsg->cmsg_len)
1773             - sizeof(struct target_cmsghdr);
1774 
1775         space += CMSG_SPACE(len);
1776         if (space > msgh->msg_controllen) {
1777             space -= CMSG_SPACE(len);
1778             /* This is a QEMU bug, since we allocated the payload
1779              * area ourselves (unlike overflow in host-to-target
1780              * conversion, which is just the guest giving us a buffer
1781              * that's too small). It can't happen for the payload types
1782              * we currently support; if it becomes an issue in future
1783              * we would need to improve our allocation strategy to
1784              * something more intelligent than "twice the size of the
1785              * target buffer we're reading from".
1786              */
1787             qemu_log_mask(LOG_UNIMP,
1788                           ("Unsupported ancillary data %d/%d: "
1789                            "unhandled msg size\n"),
1790                           tswap32(target_cmsg->cmsg_level),
1791                           tswap32(target_cmsg->cmsg_type));
1792             break;
1793         }
1794 
1795         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1796             cmsg->cmsg_level = SOL_SOCKET;
1797         } else {
1798             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1799         }
1800         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1801         cmsg->cmsg_len = CMSG_LEN(len);
1802 
1803         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1804             int *fd = (int *)data;
1805             int *target_fd = (int *)target_data;
1806             int i, numfds = len / sizeof(int);
1807 
1808             for (i = 0; i < numfds; i++) {
1809                 __get_user(fd[i], target_fd + i);
1810             }
1811         } else if (cmsg->cmsg_level == SOL_SOCKET
1812                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1813             struct ucred *cred = (struct ucred *)data;
1814             struct target_ucred *target_cred =
1815                 (struct target_ucred *)target_data;
1816 
1817             __get_user(cred->pid, &target_cred->pid);
1818             __get_user(cred->uid, &target_cred->uid);
1819             __get_user(cred->gid, &target_cred->gid);
1820         } else {
1821             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1822                           cmsg->cmsg_level, cmsg->cmsg_type);
1823             memcpy(data, target_data, len);
1824         }
1825 
1826         cmsg = CMSG_NXTHDR(msgh, cmsg);
1827         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1828                                          target_cmsg_start);
1829     }
1830     unlock_user(target_cmsg, target_cmsg_addr, 0);
1831  the_end:
1832     msgh->msg_controllen = space;
1833     return 0;
1834 }
1835 
1836 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1837                                            struct msghdr *msgh)
1838 {
1839     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1840     abi_long msg_controllen;
1841     abi_ulong target_cmsg_addr;
1842     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1843     socklen_t space = 0;
1844 
1845     msg_controllen = tswapal(target_msgh->msg_controllen);
1846     if (msg_controllen < sizeof (struct target_cmsghdr))
1847         goto the_end;
1848     target_cmsg_addr = tswapal(target_msgh->msg_control);
1849     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1850     target_cmsg_start = target_cmsg;
1851     if (!target_cmsg)
1852         return -TARGET_EFAULT;
1853 
1854     while (cmsg && target_cmsg) {
1855         void *data = CMSG_DATA(cmsg);
1856         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1857 
1858         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1859         int tgt_len, tgt_space;
1860 
1861         /* We never copy a half-header but may copy half-data;
1862          * this is Linux's behaviour in put_cmsg(). Note that
1863          * truncation here is a guest problem (which we report
1864          * to the guest via the CTRUNC bit), unlike truncation
1865          * in target_to_host_cmsg, which is a QEMU bug.
1866          */
1867         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1868             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1869             break;
1870         }
1871 
1872         if (cmsg->cmsg_level == SOL_SOCKET) {
1873             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1874         } else {
1875             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1876         }
1877         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1878 
1879         /* Payload types which need a different size of payload on
1880          * the target must adjust tgt_len here.
1881          */
1882         tgt_len = len;
1883         switch (cmsg->cmsg_level) {
1884         case SOL_SOCKET:
1885             switch (cmsg->cmsg_type) {
1886             case SO_TIMESTAMP:
1887                 tgt_len = sizeof(struct target_timeval);
1888                 break;
1889             default:
1890                 break;
1891             }
1892             break;
1893         default:
1894             break;
1895         }
1896 
1897         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1898             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1899             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1900         }
1901 
1902         /* We must now copy-and-convert len bytes of payload
1903          * into tgt_len bytes of destination space. Bear in mind
1904          * that in both source and destination we may be dealing
1905          * with a truncated value!
1906          */
1907         switch (cmsg->cmsg_level) {
1908         case SOL_SOCKET:
1909             switch (cmsg->cmsg_type) {
1910             case SCM_RIGHTS:
1911             {
1912                 int *fd = (int *)data;
1913                 int *target_fd = (int *)target_data;
1914                 int i, numfds = tgt_len / sizeof(int);
1915 
1916                 for (i = 0; i < numfds; i++) {
1917                     __put_user(fd[i], target_fd + i);
1918                 }
1919                 break;
1920             }
1921             case SO_TIMESTAMP:
1922             {
1923                 struct timeval *tv = (struct timeval *)data;
1924                 struct target_timeval *target_tv =
1925                     (struct target_timeval *)target_data;
1926 
1927                 if (len != sizeof(struct timeval) ||
1928                     tgt_len != sizeof(struct target_timeval)) {
1929                     goto unimplemented;
1930                 }
1931 
1932                 /* copy struct timeval to target */
1933                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1934                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1935                 break;
1936             }
1937             case SCM_CREDENTIALS:
1938             {
1939                 struct ucred *cred = (struct ucred *)data;
1940                 struct target_ucred *target_cred =
1941                     (struct target_ucred *)target_data;
1942 
1943                 __put_user(cred->pid, &target_cred->pid);
1944                 __put_user(cred->uid, &target_cred->uid);
1945                 __put_user(cred->gid, &target_cred->gid);
1946                 break;
1947             }
1948             default:
1949                 goto unimplemented;
1950             }
1951             break;
1952 
1953         case SOL_IP:
1954             switch (cmsg->cmsg_type) {
1955             case IP_TTL:
1956             {
1957                 uint32_t *v = (uint32_t *)data;
1958                 uint32_t *t_int = (uint32_t *)target_data;
1959 
1960                 if (len != sizeof(uint32_t) ||
1961                     tgt_len != sizeof(uint32_t)) {
1962                     goto unimplemented;
1963                 }
1964                 __put_user(*v, t_int);
1965                 break;
1966             }
1967             case IP_RECVERR:
1968             {
1969                 struct errhdr_t {
1970                    struct sock_extended_err ee;
1971                    struct sockaddr_in offender;
1972                 };
1973                 struct errhdr_t *errh = (struct errhdr_t *)data;
1974                 struct errhdr_t *target_errh =
1975                     (struct errhdr_t *)target_data;
1976 
1977                 if (len != sizeof(struct errhdr_t) ||
1978                     tgt_len != sizeof(struct errhdr_t)) {
1979                     goto unimplemented;
1980                 }
1981                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1982                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1983                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1984                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1985                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1986                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1987                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1988                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1989                     (void *) &errh->offender, sizeof(errh->offender));
1990                 break;
1991             }
1992             default:
1993                 goto unimplemented;
1994             }
1995             break;
1996 
1997         case SOL_IPV6:
1998             switch (cmsg->cmsg_type) {
1999             case IPV6_HOPLIMIT:
2000             {
2001                 uint32_t *v = (uint32_t *)data;
2002                 uint32_t *t_int = (uint32_t *)target_data;
2003 
2004                 if (len != sizeof(uint32_t) ||
2005                     tgt_len != sizeof(uint32_t)) {
2006                     goto unimplemented;
2007                 }
2008                 __put_user(*v, t_int);
2009                 break;
2010             }
2011             case IPV6_RECVERR:
2012             {
2013                 struct errhdr6_t {
2014                    struct sock_extended_err ee;
2015                    struct sockaddr_in6 offender;
2016                 };
2017                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2018                 struct errhdr6_t *target_errh =
2019                     (struct errhdr6_t *)target_data;
2020 
2021                 if (len != sizeof(struct errhdr6_t) ||
2022                     tgt_len != sizeof(struct errhdr6_t)) {
2023                     goto unimplemented;
2024                 }
2025                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2026                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2027                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2028                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2029                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2030                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2031                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2032                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2033                     (void *) &errh->offender, sizeof(errh->offender));
2034                 break;
2035             }
2036             default:
2037                 goto unimplemented;
2038             }
2039             break;
2040 
2041         default:
2042         unimplemented:
2043             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2044                           cmsg->cmsg_level, cmsg->cmsg_type);
2045             memcpy(target_data, data, MIN(len, tgt_len));
2046             if (tgt_len > len) {
2047                 memset(target_data + len, 0, tgt_len - len);
2048             }
2049         }
2050 
2051         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2052         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2053         if (msg_controllen < tgt_space) {
2054             tgt_space = msg_controllen;
2055         }
2056         msg_controllen -= tgt_space;
2057         space += tgt_space;
2058         cmsg = CMSG_NXTHDR(msgh, cmsg);
2059         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2060                                          target_cmsg_start);
2061     }
2062     unlock_user(target_cmsg, target_cmsg_addr, space);
2063  the_end:
2064     target_msgh->msg_controllen = tswapal(space);
2065     return 0;
2066 }
2067 
2068 /* do_setsockopt() Must return target values and target errnos. */
2069 static abi_long do_setsockopt(int sockfd, int level, int optname,
2070                               abi_ulong optval_addr, socklen_t optlen)
2071 {
2072     abi_long ret;
2073     int val;
2074     struct ip_mreqn *ip_mreq;
2075     struct ip_mreq_source *ip_mreq_source;
2076 
2077     switch(level) {
2078     case SOL_TCP:
2079     case SOL_UDP:
2080         /* TCP and UDP options all take an 'int' value.  */
2081         if (optlen < sizeof(uint32_t))
2082             return -TARGET_EINVAL;
2083 
2084         if (get_user_u32(val, optval_addr))
2085             return -TARGET_EFAULT;
2086         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2087         break;
2088     case SOL_IP:
2089         switch(optname) {
2090         case IP_TOS:
2091         case IP_TTL:
2092         case IP_HDRINCL:
2093         case IP_ROUTER_ALERT:
2094         case IP_RECVOPTS:
2095         case IP_RETOPTS:
2096         case IP_PKTINFO:
2097         case IP_MTU_DISCOVER:
2098         case IP_RECVERR:
2099         case IP_RECVTTL:
2100         case IP_RECVTOS:
2101 #ifdef IP_FREEBIND
2102         case IP_FREEBIND:
2103 #endif
2104         case IP_MULTICAST_TTL:
2105         case IP_MULTICAST_LOOP:
2106             val = 0;
2107             if (optlen >= sizeof(uint32_t)) {
2108                 if (get_user_u32(val, optval_addr))
2109                     return -TARGET_EFAULT;
2110             } else if (optlen >= 1) {
2111                 if (get_user_u8(val, optval_addr))
2112                     return -TARGET_EFAULT;
2113             }
2114             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2115             break;
2116         case IP_ADD_MEMBERSHIP:
2117         case IP_DROP_MEMBERSHIP:
2118             if (optlen < sizeof (struct target_ip_mreq) ||
2119                 optlen > sizeof (struct target_ip_mreqn))
2120                 return -TARGET_EINVAL;
2121 
2122             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2123             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2124             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2125             break;
2126 
2127         case IP_BLOCK_SOURCE:
2128         case IP_UNBLOCK_SOURCE:
2129         case IP_ADD_SOURCE_MEMBERSHIP:
2130         case IP_DROP_SOURCE_MEMBERSHIP:
2131             if (optlen != sizeof (struct target_ip_mreq_source))
2132                 return -TARGET_EINVAL;
2133 
2134             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2135             if (!ip_mreq_source) {
2136                 return -TARGET_EFAULT;
2137             }
2138             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2139             unlock_user (ip_mreq_source, optval_addr, 0);
2140             break;
2141 
2142         default:
2143             goto unimplemented;
2144         }
2145         break;
2146     case SOL_IPV6:
2147         switch (optname) {
2148         case IPV6_MTU_DISCOVER:
2149         case IPV6_MTU:
2150         case IPV6_V6ONLY:
2151         case IPV6_RECVPKTINFO:
2152         case IPV6_UNICAST_HOPS:
2153         case IPV6_MULTICAST_HOPS:
2154         case IPV6_MULTICAST_LOOP:
2155         case IPV6_RECVERR:
2156         case IPV6_RECVHOPLIMIT:
2157         case IPV6_2292HOPLIMIT:
2158         case IPV6_CHECKSUM:
2159         case IPV6_ADDRFORM:
2160         case IPV6_2292PKTINFO:
2161         case IPV6_RECVTCLASS:
2162         case IPV6_RECVRTHDR:
2163         case IPV6_2292RTHDR:
2164         case IPV6_RECVHOPOPTS:
2165         case IPV6_2292HOPOPTS:
2166         case IPV6_RECVDSTOPTS:
2167         case IPV6_2292DSTOPTS:
2168         case IPV6_TCLASS:
2169         case IPV6_ADDR_PREFERENCES:
2170 #ifdef IPV6_RECVPATHMTU
2171         case IPV6_RECVPATHMTU:
2172 #endif
2173 #ifdef IPV6_TRANSPARENT
2174         case IPV6_TRANSPARENT:
2175 #endif
2176 #ifdef IPV6_FREEBIND
2177         case IPV6_FREEBIND:
2178 #endif
2179 #ifdef IPV6_RECVORIGDSTADDR
2180         case IPV6_RECVORIGDSTADDR:
2181 #endif
2182             val = 0;
2183             if (optlen < sizeof(uint32_t)) {
2184                 return -TARGET_EINVAL;
2185             }
2186             if (get_user_u32(val, optval_addr)) {
2187                 return -TARGET_EFAULT;
2188             }
2189             ret = get_errno(setsockopt(sockfd, level, optname,
2190                                        &val, sizeof(val)));
2191             break;
2192         case IPV6_PKTINFO:
2193         {
2194             struct in6_pktinfo pki;
2195 
2196             if (optlen < sizeof(pki)) {
2197                 return -TARGET_EINVAL;
2198             }
2199 
2200             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2201                 return -TARGET_EFAULT;
2202             }
2203 
2204             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2205 
2206             ret = get_errno(setsockopt(sockfd, level, optname,
2207                                        &pki, sizeof(pki)));
2208             break;
2209         }
2210         case IPV6_ADD_MEMBERSHIP:
2211         case IPV6_DROP_MEMBERSHIP:
2212         {
2213             struct ipv6_mreq ipv6mreq;
2214 
2215             if (optlen < sizeof(ipv6mreq)) {
2216                 return -TARGET_EINVAL;
2217             }
2218 
2219             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2220                 return -TARGET_EFAULT;
2221             }
2222 
2223             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2224 
2225             ret = get_errno(setsockopt(sockfd, level, optname,
2226                                        &ipv6mreq, sizeof(ipv6mreq)));
2227             break;
2228         }
2229         default:
2230             goto unimplemented;
2231         }
2232         break;
2233     case SOL_ICMPV6:
2234         switch (optname) {
2235         case ICMPV6_FILTER:
2236         {
2237             struct icmp6_filter icmp6f;
2238 
2239             if (optlen > sizeof(icmp6f)) {
2240                 optlen = sizeof(icmp6f);
2241             }
2242 
2243             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2244                 return -TARGET_EFAULT;
2245             }
2246 
2247             for (val = 0; val < 8; val++) {
2248                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2249             }
2250 
2251             ret = get_errno(setsockopt(sockfd, level, optname,
2252                                        &icmp6f, optlen));
2253             break;
2254         }
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_RAW:
2260         switch (optname) {
2261         case ICMP_FILTER:
2262         case IPV6_CHECKSUM:
2263             /* those take an u32 value */
2264             if (optlen < sizeof(uint32_t)) {
2265                 return -TARGET_EINVAL;
2266             }
2267 
2268             if (get_user_u32(val, optval_addr)) {
2269                 return -TARGET_EFAULT;
2270             }
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &val, sizeof(val)));
2273             break;
2274 
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2280     case SOL_ALG:
2281         switch (optname) {
2282         case ALG_SET_KEY:
2283         {
2284             char *alg_key = g_malloc(optlen);
2285 
2286             if (!alg_key) {
2287                 return -TARGET_ENOMEM;
2288             }
2289             if (copy_from_user(alg_key, optval_addr, optlen)) {
2290                 g_free(alg_key);
2291                 return -TARGET_EFAULT;
2292             }
2293             ret = get_errno(setsockopt(sockfd, level, optname,
2294                                        alg_key, optlen));
2295             g_free(alg_key);
2296             break;
2297         }
2298         case ALG_SET_AEAD_AUTHSIZE:
2299         {
2300             ret = get_errno(setsockopt(sockfd, level, optname,
2301                                        NULL, optlen));
2302             break;
2303         }
2304         default:
2305             goto unimplemented;
2306         }
2307         break;
2308 #endif
2309     case TARGET_SOL_SOCKET:
2310         switch (optname) {
2311         case TARGET_SO_RCVTIMEO:
2312         {
2313                 struct timeval tv;
2314 
2315                 optname = SO_RCVTIMEO;
2316 
2317 set_timeout:
2318                 if (optlen != sizeof(struct target_timeval)) {
2319                     return -TARGET_EINVAL;
2320                 }
2321 
2322                 if (copy_from_user_timeval(&tv, optval_addr)) {
2323                     return -TARGET_EFAULT;
2324                 }
2325 
2326                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2327                                 &tv, sizeof(tv)));
2328                 return ret;
2329         }
2330         case TARGET_SO_SNDTIMEO:
2331                 optname = SO_SNDTIMEO;
2332                 goto set_timeout;
2333         case TARGET_SO_ATTACH_FILTER:
2334         {
2335                 struct target_sock_fprog *tfprog;
2336                 struct target_sock_filter *tfilter;
2337                 struct sock_fprog fprog;
2338                 struct sock_filter *filter;
2339                 int i;
2340 
2341                 if (optlen != sizeof(*tfprog)) {
2342                     return -TARGET_EINVAL;
2343                 }
2344                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2345                     return -TARGET_EFAULT;
2346                 }
2347                 if (!lock_user_struct(VERIFY_READ, tfilter,
2348                                       tswapal(tfprog->filter), 0)) {
2349                     unlock_user_struct(tfprog, optval_addr, 1);
2350                     return -TARGET_EFAULT;
2351                 }
2352 
2353                 fprog.len = tswap16(tfprog->len);
2354                 filter = g_try_new(struct sock_filter, fprog.len);
2355                 if (filter == NULL) {
2356                     unlock_user_struct(tfilter, tfprog->filter, 1);
2357                     unlock_user_struct(tfprog, optval_addr, 1);
2358                     return -TARGET_ENOMEM;
2359                 }
2360                 for (i = 0; i < fprog.len; i++) {
2361                     filter[i].code = tswap16(tfilter[i].code);
2362                     filter[i].jt = tfilter[i].jt;
2363                     filter[i].jf = tfilter[i].jf;
2364                     filter[i].k = tswap32(tfilter[i].k);
2365                 }
2366                 fprog.filter = filter;
2367 
2368                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2369                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2370                 g_free(filter);
2371 
2372                 unlock_user_struct(tfilter, tfprog->filter, 1);
2373                 unlock_user_struct(tfprog, optval_addr, 1);
2374                 return ret;
2375         }
2376 	case TARGET_SO_BINDTODEVICE:
2377 	{
2378 		char *dev_ifname, *addr_ifname;
2379 
2380 		if (optlen > IFNAMSIZ - 1) {
2381 		    optlen = IFNAMSIZ - 1;
2382 		}
2383 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2384 		if (!dev_ifname) {
2385 		    return -TARGET_EFAULT;
2386 		}
2387 		optname = SO_BINDTODEVICE;
2388 		addr_ifname = alloca(IFNAMSIZ);
2389 		memcpy(addr_ifname, dev_ifname, optlen);
2390 		addr_ifname[optlen] = 0;
2391 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2392                                            addr_ifname, optlen));
2393 		unlock_user (dev_ifname, optval_addr, 0);
2394 		return ret;
2395 	}
2396         case TARGET_SO_LINGER:
2397         {
2398                 struct linger lg;
2399                 struct target_linger *tlg;
2400 
2401                 if (optlen != sizeof(struct target_linger)) {
2402                     return -TARGET_EINVAL;
2403                 }
2404                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2405                     return -TARGET_EFAULT;
2406                 }
2407                 __get_user(lg.l_onoff, &tlg->l_onoff);
2408                 __get_user(lg.l_linger, &tlg->l_linger);
2409                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2410                                 &lg, sizeof(lg)));
2411                 unlock_user_struct(tlg, optval_addr, 0);
2412                 return ret;
2413         }
2414             /* Options with 'int' argument.  */
2415         case TARGET_SO_DEBUG:
2416 		optname = SO_DEBUG;
2417 		break;
2418         case TARGET_SO_REUSEADDR:
2419 		optname = SO_REUSEADDR;
2420 		break;
2421 #ifdef SO_REUSEPORT
2422         case TARGET_SO_REUSEPORT:
2423                 optname = SO_REUSEPORT;
2424                 break;
2425 #endif
2426         case TARGET_SO_TYPE:
2427 		optname = SO_TYPE;
2428 		break;
2429         case TARGET_SO_ERROR:
2430 		optname = SO_ERROR;
2431 		break;
2432         case TARGET_SO_DONTROUTE:
2433 		optname = SO_DONTROUTE;
2434 		break;
2435         case TARGET_SO_BROADCAST:
2436 		optname = SO_BROADCAST;
2437 		break;
2438         case TARGET_SO_SNDBUF:
2439 		optname = SO_SNDBUF;
2440 		break;
2441         case TARGET_SO_SNDBUFFORCE:
2442                 optname = SO_SNDBUFFORCE;
2443                 break;
2444         case TARGET_SO_RCVBUF:
2445 		optname = SO_RCVBUF;
2446 		break;
2447         case TARGET_SO_RCVBUFFORCE:
2448                 optname = SO_RCVBUFFORCE;
2449                 break;
2450         case TARGET_SO_KEEPALIVE:
2451 		optname = SO_KEEPALIVE;
2452 		break;
2453         case TARGET_SO_OOBINLINE:
2454 		optname = SO_OOBINLINE;
2455 		break;
2456         case TARGET_SO_NO_CHECK:
2457 		optname = SO_NO_CHECK;
2458 		break;
2459         case TARGET_SO_PRIORITY:
2460 		optname = SO_PRIORITY;
2461 		break;
2462 #ifdef SO_BSDCOMPAT
2463         case TARGET_SO_BSDCOMPAT:
2464 		optname = SO_BSDCOMPAT;
2465 		break;
2466 #endif
2467         case TARGET_SO_PASSCRED:
2468 		optname = SO_PASSCRED;
2469 		break;
2470         case TARGET_SO_PASSSEC:
2471                 optname = SO_PASSSEC;
2472                 break;
2473         case TARGET_SO_TIMESTAMP:
2474 		optname = SO_TIMESTAMP;
2475 		break;
2476         case TARGET_SO_RCVLOWAT:
2477 		optname = SO_RCVLOWAT;
2478 		break;
2479         default:
2480             goto unimplemented;
2481         }
2482 	if (optlen < sizeof(uint32_t))
2483             return -TARGET_EINVAL;
2484 
2485 	if (get_user_u32(val, optval_addr))
2486             return -TARGET_EFAULT;
2487 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2488         break;
2489 #ifdef SOL_NETLINK
2490     case SOL_NETLINK:
2491         switch (optname) {
2492         case NETLINK_PKTINFO:
2493         case NETLINK_ADD_MEMBERSHIP:
2494         case NETLINK_DROP_MEMBERSHIP:
2495         case NETLINK_BROADCAST_ERROR:
2496         case NETLINK_NO_ENOBUFS:
2497 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2498         case NETLINK_LISTEN_ALL_NSID:
2499         case NETLINK_CAP_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2502         case NETLINK_EXT_ACK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2505         case NETLINK_GET_STRICT_CHK:
2506 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2507             break;
2508         default:
2509             goto unimplemented;
2510         }
2511         val = 0;
2512         if (optlen < sizeof(uint32_t)) {
2513             return -TARGET_EINVAL;
2514         }
2515         if (get_user_u32(val, optval_addr)) {
2516             return -TARGET_EFAULT;
2517         }
2518         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2519                                    sizeof(val)));
2520         break;
2521 #endif /* SOL_NETLINK */
2522     default:
2523     unimplemented:
2524         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2525                       level, optname);
2526         ret = -TARGET_ENOPROTOOPT;
2527     }
2528     return ret;
2529 }
2530 
2531 /* do_getsockopt() Must return target values and target errnos. */
2532 static abi_long do_getsockopt(int sockfd, int level, int optname,
2533                               abi_ulong optval_addr, abi_ulong optlen)
2534 {
2535     abi_long ret;
2536     int len, val;
2537     socklen_t lv;
2538 
2539     switch(level) {
2540     case TARGET_SOL_SOCKET:
2541         level = SOL_SOCKET;
2542         switch (optname) {
2543         /* These don't just return a single integer */
2544         case TARGET_SO_PEERNAME:
2545             goto unimplemented;
2546         case TARGET_SO_RCVTIMEO: {
2547             struct timeval tv;
2548             socklen_t tvlen;
2549 
2550             optname = SO_RCVTIMEO;
2551 
2552 get_timeout:
2553             if (get_user_u32(len, optlen)) {
2554                 return -TARGET_EFAULT;
2555             }
2556             if (len < 0) {
2557                 return -TARGET_EINVAL;
2558             }
2559 
2560             tvlen = sizeof(tv);
2561             ret = get_errno(getsockopt(sockfd, level, optname,
2562                                        &tv, &tvlen));
2563             if (ret < 0) {
2564                 return ret;
2565             }
2566             if (len > sizeof(struct target_timeval)) {
2567                 len = sizeof(struct target_timeval);
2568             }
2569             if (copy_to_user_timeval(optval_addr, &tv)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             if (put_user_u32(len, optlen)) {
2573                 return -TARGET_EFAULT;
2574             }
2575             break;
2576         }
2577         case TARGET_SO_SNDTIMEO:
2578             optname = SO_SNDTIMEO;
2579             goto get_timeout;
2580         case TARGET_SO_PEERCRED: {
2581             struct ucred cr;
2582             socklen_t crlen;
2583             struct target_ucred *tcr;
2584 
2585             if (get_user_u32(len, optlen)) {
2586                 return -TARGET_EFAULT;
2587             }
2588             if (len < 0) {
2589                 return -TARGET_EINVAL;
2590             }
2591 
2592             crlen = sizeof(cr);
2593             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2594                                        &cr, &crlen));
2595             if (ret < 0) {
2596                 return ret;
2597             }
2598             if (len > crlen) {
2599                 len = crlen;
2600             }
2601             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2602                 return -TARGET_EFAULT;
2603             }
2604             __put_user(cr.pid, &tcr->pid);
2605             __put_user(cr.uid, &tcr->uid);
2606             __put_user(cr.gid, &tcr->gid);
2607             unlock_user_struct(tcr, optval_addr, 1);
2608             if (put_user_u32(len, optlen)) {
2609                 return -TARGET_EFAULT;
2610             }
2611             break;
2612         }
2613         case TARGET_SO_PEERSEC: {
2614             char *name;
2615 
2616             if (get_user_u32(len, optlen)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (len < 0) {
2620                 return -TARGET_EINVAL;
2621             }
2622             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2623             if (!name) {
2624                 return -TARGET_EFAULT;
2625             }
2626             lv = len;
2627             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2628                                        name, &lv));
2629             if (put_user_u32(lv, optlen)) {
2630                 ret = -TARGET_EFAULT;
2631             }
2632             unlock_user(name, optval_addr, lv);
2633             break;
2634         }
2635         case TARGET_SO_LINGER:
2636         {
2637             struct linger lg;
2638             socklen_t lglen;
2639             struct target_linger *tlg;
2640 
2641             if (get_user_u32(len, optlen)) {
2642                 return -TARGET_EFAULT;
2643             }
2644             if (len < 0) {
2645                 return -TARGET_EINVAL;
2646             }
2647 
2648             lglen = sizeof(lg);
2649             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2650                                        &lg, &lglen));
2651             if (ret < 0) {
2652                 return ret;
2653             }
2654             if (len > lglen) {
2655                 len = lglen;
2656             }
2657             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2658                 return -TARGET_EFAULT;
2659             }
2660             __put_user(lg.l_onoff, &tlg->l_onoff);
2661             __put_user(lg.l_linger, &tlg->l_linger);
2662             unlock_user_struct(tlg, optval_addr, 1);
2663             if (put_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             break;
2667         }
2668         /* Options with 'int' argument.  */
2669         case TARGET_SO_DEBUG:
2670             optname = SO_DEBUG;
2671             goto int_case;
2672         case TARGET_SO_REUSEADDR:
2673             optname = SO_REUSEADDR;
2674             goto int_case;
2675 #ifdef SO_REUSEPORT
2676         case TARGET_SO_REUSEPORT:
2677             optname = SO_REUSEPORT;
2678             goto int_case;
2679 #endif
2680         case TARGET_SO_TYPE:
2681             optname = SO_TYPE;
2682             goto int_case;
2683         case TARGET_SO_ERROR:
2684             optname = SO_ERROR;
2685             goto int_case;
2686         case TARGET_SO_DONTROUTE:
2687             optname = SO_DONTROUTE;
2688             goto int_case;
2689         case TARGET_SO_BROADCAST:
2690             optname = SO_BROADCAST;
2691             goto int_case;
2692         case TARGET_SO_SNDBUF:
2693             optname = SO_SNDBUF;
2694             goto int_case;
2695         case TARGET_SO_RCVBUF:
2696             optname = SO_RCVBUF;
2697             goto int_case;
2698         case TARGET_SO_KEEPALIVE:
2699             optname = SO_KEEPALIVE;
2700             goto int_case;
2701         case TARGET_SO_OOBINLINE:
2702             optname = SO_OOBINLINE;
2703             goto int_case;
2704         case TARGET_SO_NO_CHECK:
2705             optname = SO_NO_CHECK;
2706             goto int_case;
2707         case TARGET_SO_PRIORITY:
2708             optname = SO_PRIORITY;
2709             goto int_case;
2710 #ifdef SO_BSDCOMPAT
2711         case TARGET_SO_BSDCOMPAT:
2712             optname = SO_BSDCOMPAT;
2713             goto int_case;
2714 #endif
2715         case TARGET_SO_PASSCRED:
2716             optname = SO_PASSCRED;
2717             goto int_case;
2718         case TARGET_SO_TIMESTAMP:
2719             optname = SO_TIMESTAMP;
2720             goto int_case;
2721         case TARGET_SO_RCVLOWAT:
2722             optname = SO_RCVLOWAT;
2723             goto int_case;
2724         case TARGET_SO_ACCEPTCONN:
2725             optname = SO_ACCEPTCONN;
2726             goto int_case;
2727         case TARGET_SO_PROTOCOL:
2728             optname = SO_PROTOCOL;
2729             goto int_case;
2730         case TARGET_SO_DOMAIN:
2731             optname = SO_DOMAIN;
2732             goto int_case;
2733         default:
2734             goto int_case;
2735         }
2736         break;
2737     case SOL_TCP:
2738     case SOL_UDP:
2739         /* TCP and UDP options all take an 'int' value.  */
2740     int_case:
2741         if (get_user_u32(len, optlen))
2742             return -TARGET_EFAULT;
2743         if (len < 0)
2744             return -TARGET_EINVAL;
2745         lv = sizeof(lv);
2746         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2747         if (ret < 0)
2748             return ret;
2749         if (optname == SO_TYPE) {
2750             val = host_to_target_sock_type(val);
2751         }
2752         if (len > lv)
2753             len = lv;
2754         if (len == 4) {
2755             if (put_user_u32(val, optval_addr))
2756                 return -TARGET_EFAULT;
2757         } else {
2758             if (put_user_u8(val, optval_addr))
2759                 return -TARGET_EFAULT;
2760         }
2761         if (put_user_u32(len, optlen))
2762             return -TARGET_EFAULT;
2763         break;
2764     case SOL_IP:
2765         switch(optname) {
2766         case IP_TOS:
2767         case IP_TTL:
2768         case IP_HDRINCL:
2769         case IP_ROUTER_ALERT:
2770         case IP_RECVOPTS:
2771         case IP_RETOPTS:
2772         case IP_PKTINFO:
2773         case IP_MTU_DISCOVER:
2774         case IP_RECVERR:
2775         case IP_RECVTOS:
2776 #ifdef IP_FREEBIND
2777         case IP_FREEBIND:
2778 #endif
2779         case IP_MULTICAST_TTL:
2780         case IP_MULTICAST_LOOP:
2781             if (get_user_u32(len, optlen))
2782                 return -TARGET_EFAULT;
2783             if (len < 0)
2784                 return -TARGET_EINVAL;
2785             lv = sizeof(lv);
2786             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2787             if (ret < 0)
2788                 return ret;
2789             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2790                 len = 1;
2791                 if (put_user_u32(len, optlen)
2792                     || put_user_u8(val, optval_addr))
2793                     return -TARGET_EFAULT;
2794             } else {
2795                 if (len > sizeof(int))
2796                     len = sizeof(int);
2797                 if (put_user_u32(len, optlen)
2798                     || put_user_u32(val, optval_addr))
2799                     return -TARGET_EFAULT;
2800             }
2801             break;
2802         default:
2803             ret = -TARGET_ENOPROTOOPT;
2804             break;
2805         }
2806         break;
2807     case SOL_IPV6:
2808         switch (optname) {
2809         case IPV6_MTU_DISCOVER:
2810         case IPV6_MTU:
2811         case IPV6_V6ONLY:
2812         case IPV6_RECVPKTINFO:
2813         case IPV6_UNICAST_HOPS:
2814         case IPV6_MULTICAST_HOPS:
2815         case IPV6_MULTICAST_LOOP:
2816         case IPV6_RECVERR:
2817         case IPV6_RECVHOPLIMIT:
2818         case IPV6_2292HOPLIMIT:
2819         case IPV6_CHECKSUM:
2820         case IPV6_ADDRFORM:
2821         case IPV6_2292PKTINFO:
2822         case IPV6_RECVTCLASS:
2823         case IPV6_RECVRTHDR:
2824         case IPV6_2292RTHDR:
2825         case IPV6_RECVHOPOPTS:
2826         case IPV6_2292HOPOPTS:
2827         case IPV6_RECVDSTOPTS:
2828         case IPV6_2292DSTOPTS:
2829         case IPV6_TCLASS:
2830         case IPV6_ADDR_PREFERENCES:
2831 #ifdef IPV6_RECVPATHMTU
2832         case IPV6_RECVPATHMTU:
2833 #endif
2834 #ifdef IPV6_TRANSPARENT
2835         case IPV6_TRANSPARENT:
2836 #endif
2837 #ifdef IPV6_FREEBIND
2838         case IPV6_FREEBIND:
2839 #endif
2840 #ifdef IPV6_RECVORIGDSTADDR
2841         case IPV6_RECVORIGDSTADDR:
2842 #endif
2843             if (get_user_u32(len, optlen))
2844                 return -TARGET_EFAULT;
2845             if (len < 0)
2846                 return -TARGET_EINVAL;
2847             lv = sizeof(lv);
2848             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2849             if (ret < 0)
2850                 return ret;
2851             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2852                 len = 1;
2853                 if (put_user_u32(len, optlen)
2854                     || put_user_u8(val, optval_addr))
2855                     return -TARGET_EFAULT;
2856             } else {
2857                 if (len > sizeof(int))
2858                     len = sizeof(int);
2859                 if (put_user_u32(len, optlen)
2860                     || put_user_u32(val, optval_addr))
2861                     return -TARGET_EFAULT;
2862             }
2863             break;
2864         default:
2865             ret = -TARGET_ENOPROTOOPT;
2866             break;
2867         }
2868         break;
2869 #ifdef SOL_NETLINK
2870     case SOL_NETLINK:
2871         switch (optname) {
2872         case NETLINK_PKTINFO:
2873         case NETLINK_BROADCAST_ERROR:
2874         case NETLINK_NO_ENOBUFS:
2875 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2876         case NETLINK_LISTEN_ALL_NSID:
2877         case NETLINK_CAP_ACK:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2880         case NETLINK_EXT_ACK:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2883         case NETLINK_GET_STRICT_CHK:
2884 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2885             if (get_user_u32(len, optlen)) {
2886                 return -TARGET_EFAULT;
2887             }
2888             if (len != sizeof(val)) {
2889                 return -TARGET_EINVAL;
2890             }
2891             lv = len;
2892             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2893             if (ret < 0) {
2894                 return ret;
2895             }
2896             if (put_user_u32(lv, optlen)
2897                 || put_user_u32(val, optval_addr)) {
2898                 return -TARGET_EFAULT;
2899             }
2900             break;
2901 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2902         case NETLINK_LIST_MEMBERSHIPS:
2903         {
2904             uint32_t *results;
2905             int i;
2906             if (get_user_u32(len, optlen)) {
2907                 return -TARGET_EFAULT;
2908             }
2909             if (len < 0) {
2910                 return -TARGET_EINVAL;
2911             }
2912             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2913             if (!results && len > 0) {
2914                 return -TARGET_EFAULT;
2915             }
2916             lv = len;
2917             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2918             if (ret < 0) {
2919                 unlock_user(results, optval_addr, 0);
2920                 return ret;
2921             }
2922             /* swap host endianess to target endianess. */
2923             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2924                 results[i] = tswap32(results[i]);
2925             }
2926             if (put_user_u32(lv, optlen)) {
2927                 return -TARGET_EFAULT;
2928             }
2929             unlock_user(results, optval_addr, 0);
2930             break;
2931         }
2932 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2933         default:
2934             goto unimplemented;
2935         }
2936         break;
2937 #endif /* SOL_NETLINK */
2938     default:
2939     unimplemented:
2940         qemu_log_mask(LOG_UNIMP,
2941                       "getsockopt level=%d optname=%d not yet supported\n",
2942                       level, optname);
2943         ret = -TARGET_EOPNOTSUPP;
2944         break;
2945     }
2946     return ret;
2947 }
2948 
2949 /* Convert target low/high pair representing file offset into the host
2950  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2951  * as the kernel doesn't handle them either.
2952  */
2953 static void target_to_host_low_high(abi_ulong tlow,
2954                                     abi_ulong thigh,
2955                                     unsigned long *hlow,
2956                                     unsigned long *hhigh)
2957 {
2958     uint64_t off = tlow |
2959         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2960         TARGET_LONG_BITS / 2;
2961 
2962     *hlow = off;
2963     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2964 }
2965 
2966 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2967                                 abi_ulong count, int copy)
2968 {
2969     struct target_iovec *target_vec;
2970     struct iovec *vec;
2971     abi_ulong total_len, max_len;
2972     int i;
2973     int err = 0;
2974     bool bad_address = false;
2975 
2976     if (count == 0) {
2977         errno = 0;
2978         return NULL;
2979     }
2980     if (count > IOV_MAX) {
2981         errno = EINVAL;
2982         return NULL;
2983     }
2984 
2985     vec = g_try_new0(struct iovec, count);
2986     if (vec == NULL) {
2987         errno = ENOMEM;
2988         return NULL;
2989     }
2990 
2991     target_vec = lock_user(VERIFY_READ, target_addr,
2992                            count * sizeof(struct target_iovec), 1);
2993     if (target_vec == NULL) {
2994         err = EFAULT;
2995         goto fail2;
2996     }
2997 
2998     /* ??? If host page size > target page size, this will result in a
2999        value larger than what we can actually support.  */
3000     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3001     total_len = 0;
3002 
3003     for (i = 0; i < count; i++) {
3004         abi_ulong base = tswapal(target_vec[i].iov_base);
3005         abi_long len = tswapal(target_vec[i].iov_len);
3006 
3007         if (len < 0) {
3008             err = EINVAL;
3009             goto fail;
3010         } else if (len == 0) {
3011             /* Zero length pointer is ignored.  */
3012             vec[i].iov_base = 0;
3013         } else {
3014             vec[i].iov_base = lock_user(type, base, len, copy);
3015             /* If the first buffer pointer is bad, this is a fault.  But
3016              * subsequent bad buffers will result in a partial write; this
3017              * is realized by filling the vector with null pointers and
3018              * zero lengths. */
3019             if (!vec[i].iov_base) {
3020                 if (i == 0) {
3021                     err = EFAULT;
3022                     goto fail;
3023                 } else {
3024                     bad_address = true;
3025                 }
3026             }
3027             if (bad_address) {
3028                 len = 0;
3029             }
3030             if (len > max_len - total_len) {
3031                 len = max_len - total_len;
3032             }
3033         }
3034         vec[i].iov_len = len;
3035         total_len += len;
3036     }
3037 
3038     unlock_user(target_vec, target_addr, 0);
3039     return vec;
3040 
3041  fail:
3042     while (--i >= 0) {
3043         if (tswapal(target_vec[i].iov_len) > 0) {
3044             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3045         }
3046     }
3047     unlock_user(target_vec, target_addr, 0);
3048  fail2:
3049     g_free(vec);
3050     errno = err;
3051     return NULL;
3052 }
3053 
3054 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3055                          abi_ulong count, int copy)
3056 {
3057     struct target_iovec *target_vec;
3058     int i;
3059 
3060     target_vec = lock_user(VERIFY_READ, target_addr,
3061                            count * sizeof(struct target_iovec), 1);
3062     if (target_vec) {
3063         for (i = 0; i < count; i++) {
3064             abi_ulong base = tswapal(target_vec[i].iov_base);
3065             abi_long len = tswapal(target_vec[i].iov_len);
3066             if (len < 0) {
3067                 break;
3068             }
3069             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3070         }
3071         unlock_user(target_vec, target_addr, 0);
3072     }
3073 
3074     g_free(vec);
3075 }
3076 
3077 static inline int target_to_host_sock_type(int *type)
3078 {
3079     int host_type = 0;
3080     int target_type = *type;
3081 
3082     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3083     case TARGET_SOCK_DGRAM:
3084         host_type = SOCK_DGRAM;
3085         break;
3086     case TARGET_SOCK_STREAM:
3087         host_type = SOCK_STREAM;
3088         break;
3089     default:
3090         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3091         break;
3092     }
3093     if (target_type & TARGET_SOCK_CLOEXEC) {
3094 #if defined(SOCK_CLOEXEC)
3095         host_type |= SOCK_CLOEXEC;
3096 #else
3097         return -TARGET_EINVAL;
3098 #endif
3099     }
3100     if (target_type & TARGET_SOCK_NONBLOCK) {
3101 #if defined(SOCK_NONBLOCK)
3102         host_type |= SOCK_NONBLOCK;
3103 #elif !defined(O_NONBLOCK)
3104         return -TARGET_EINVAL;
3105 #endif
3106     }
3107     *type = host_type;
3108     return 0;
3109 }
3110 
3111 /* Try to emulate socket type flags after socket creation.  */
3112 static int sock_flags_fixup(int fd, int target_type)
3113 {
3114 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3115     if (target_type & TARGET_SOCK_NONBLOCK) {
3116         int flags = fcntl(fd, F_GETFL);
3117         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3118             close(fd);
3119             return -TARGET_EINVAL;
3120         }
3121     }
3122 #endif
3123     return fd;
3124 }
3125 
3126 /* do_socket() Must return target values and target errnos. */
3127 static abi_long do_socket(int domain, int type, int protocol)
3128 {
3129     int target_type = type;
3130     int ret;
3131 
3132     ret = target_to_host_sock_type(&type);
3133     if (ret) {
3134         return ret;
3135     }
3136 
3137     if (domain == PF_NETLINK && !(
3138 #ifdef CONFIG_RTNETLINK
3139          protocol == NETLINK_ROUTE ||
3140 #endif
3141          protocol == NETLINK_KOBJECT_UEVENT ||
3142          protocol == NETLINK_AUDIT)) {
3143         return -TARGET_EPROTONOSUPPORT;
3144     }
3145 
3146     if (domain == AF_PACKET ||
3147         (domain == AF_INET && type == SOCK_PACKET)) {
3148         protocol = tswap16(protocol);
3149     }
3150 
3151     ret = get_errno(socket(domain, type, protocol));
3152     if (ret >= 0) {
3153         ret = sock_flags_fixup(ret, target_type);
3154         if (type == SOCK_PACKET) {
3155             /* Manage an obsolete case :
3156              * if socket type is SOCK_PACKET, bind by name
3157              */
3158             fd_trans_register(ret, &target_packet_trans);
3159         } else if (domain == PF_NETLINK) {
3160             switch (protocol) {
3161 #ifdef CONFIG_RTNETLINK
3162             case NETLINK_ROUTE:
3163                 fd_trans_register(ret, &target_netlink_route_trans);
3164                 break;
3165 #endif
3166             case NETLINK_KOBJECT_UEVENT:
3167                 /* nothing to do: messages are strings */
3168                 break;
3169             case NETLINK_AUDIT:
3170                 fd_trans_register(ret, &target_netlink_audit_trans);
3171                 break;
3172             default:
3173                 g_assert_not_reached();
3174             }
3175         }
3176     }
3177     return ret;
3178 }
3179 
3180 /* do_bind() Must return target values and target errnos. */
3181 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3182                         socklen_t addrlen)
3183 {
3184     void *addr;
3185     abi_long ret;
3186 
3187     if ((int)addrlen < 0) {
3188         return -TARGET_EINVAL;
3189     }
3190 
3191     addr = alloca(addrlen+1);
3192 
3193     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3194     if (ret)
3195         return ret;
3196 
3197     return get_errno(bind(sockfd, addr, addrlen));
3198 }
3199 
3200 /* do_connect() Must return target values and target errnos. */
3201 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3202                            socklen_t addrlen)
3203 {
3204     void *addr;
3205     abi_long ret;
3206 
3207     if ((int)addrlen < 0) {
3208         return -TARGET_EINVAL;
3209     }
3210 
3211     addr = alloca(addrlen+1);
3212 
3213     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3214     if (ret)
3215         return ret;
3216 
3217     return get_errno(safe_connect(sockfd, addr, addrlen));
3218 }
3219 
3220 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3221 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3222                                       int flags, int send)
3223 {
3224     abi_long ret, len;
3225     struct msghdr msg;
3226     abi_ulong count;
3227     struct iovec *vec;
3228     abi_ulong target_vec;
3229 
3230     if (msgp->msg_name) {
3231         msg.msg_namelen = tswap32(msgp->msg_namelen);
3232         msg.msg_name = alloca(msg.msg_namelen+1);
3233         ret = target_to_host_sockaddr(fd, msg.msg_name,
3234                                       tswapal(msgp->msg_name),
3235                                       msg.msg_namelen);
3236         if (ret == -TARGET_EFAULT) {
3237             /* For connected sockets msg_name and msg_namelen must
3238              * be ignored, so returning EFAULT immediately is wrong.
3239              * Instead, pass a bad msg_name to the host kernel, and
3240              * let it decide whether to return EFAULT or not.
3241              */
3242             msg.msg_name = (void *)-1;
3243         } else if (ret) {
3244             goto out2;
3245         }
3246     } else {
3247         msg.msg_name = NULL;
3248         msg.msg_namelen = 0;
3249     }
3250     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3251     msg.msg_control = alloca(msg.msg_controllen);
3252     memset(msg.msg_control, 0, msg.msg_controllen);
3253 
3254     msg.msg_flags = tswap32(msgp->msg_flags);
3255 
3256     count = tswapal(msgp->msg_iovlen);
3257     target_vec = tswapal(msgp->msg_iov);
3258 
3259     if (count > IOV_MAX) {
3260         /* sendrcvmsg returns a different errno for this condition than
3261          * readv/writev, so we must catch it here before lock_iovec() does.
3262          */
3263         ret = -TARGET_EMSGSIZE;
3264         goto out2;
3265     }
3266 
3267     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3268                      target_vec, count, send);
3269     if (vec == NULL) {
3270         ret = -host_to_target_errno(errno);
3271         goto out2;
3272     }
3273     msg.msg_iovlen = count;
3274     msg.msg_iov = vec;
3275 
3276     if (send) {
3277         if (fd_trans_target_to_host_data(fd)) {
3278             void *host_msg;
3279 
3280             host_msg = g_malloc(msg.msg_iov->iov_len);
3281             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3282             ret = fd_trans_target_to_host_data(fd)(host_msg,
3283                                                    msg.msg_iov->iov_len);
3284             if (ret >= 0) {
3285                 msg.msg_iov->iov_base = host_msg;
3286                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3287             }
3288             g_free(host_msg);
3289         } else {
3290             ret = target_to_host_cmsg(&msg, msgp);
3291             if (ret == 0) {
3292                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3293             }
3294         }
3295     } else {
3296         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3297         if (!is_error(ret)) {
3298             len = ret;
3299             if (fd_trans_host_to_target_data(fd)) {
3300                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3301                                                MIN(msg.msg_iov->iov_len, len));
3302             } else {
3303                 ret = host_to_target_cmsg(msgp, &msg);
3304             }
3305             if (!is_error(ret)) {
3306                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3307                 msgp->msg_flags = tswap32(msg.msg_flags);
3308                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3309                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3310                                     msg.msg_name, msg.msg_namelen);
3311                     if (ret) {
3312                         goto out;
3313                     }
3314                 }
3315 
3316                 ret = len;
3317             }
3318         }
3319     }
3320 
3321 out:
3322     unlock_iovec(vec, target_vec, count, !send);
3323 out2:
3324     return ret;
3325 }
3326 
3327 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3328                                int flags, int send)
3329 {
3330     abi_long ret;
3331     struct target_msghdr *msgp;
3332 
3333     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3334                           msgp,
3335                           target_msg,
3336                           send ? 1 : 0)) {
3337         return -TARGET_EFAULT;
3338     }
3339     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3340     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3341     return ret;
3342 }
3343 
3344 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3345  * so it might not have this *mmsg-specific flag either.
3346  */
3347 #ifndef MSG_WAITFORONE
3348 #define MSG_WAITFORONE 0x10000
3349 #endif
3350 
3351 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3352                                 unsigned int vlen, unsigned int flags,
3353                                 int send)
3354 {
3355     struct target_mmsghdr *mmsgp;
3356     abi_long ret = 0;
3357     int i;
3358 
3359     if (vlen > UIO_MAXIOV) {
3360         vlen = UIO_MAXIOV;
3361     }
3362 
3363     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3364     if (!mmsgp) {
3365         return -TARGET_EFAULT;
3366     }
3367 
3368     for (i = 0; i < vlen; i++) {
3369         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3370         if (is_error(ret)) {
3371             break;
3372         }
3373         mmsgp[i].msg_len = tswap32(ret);
3374         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3375         if (flags & MSG_WAITFORONE) {
3376             flags |= MSG_DONTWAIT;
3377         }
3378     }
3379 
3380     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3381 
3382     /* Return number of datagrams sent if we sent any at all;
3383      * otherwise return the error.
3384      */
3385     if (i) {
3386         return i;
3387     }
3388     return ret;
3389 }
3390 
3391 /* do_accept4() Must return target values and target errnos. */
3392 static abi_long do_accept4(int fd, abi_ulong target_addr,
3393                            abi_ulong target_addrlen_addr, int flags)
3394 {
3395     socklen_t addrlen, ret_addrlen;
3396     void *addr;
3397     abi_long ret;
3398     int host_flags;
3399 
3400     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3401 
3402     if (target_addr == 0) {
3403         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3404     }
3405 
3406     /* linux returns EFAULT if addrlen pointer is invalid */
3407     if (get_user_u32(addrlen, target_addrlen_addr))
3408         return -TARGET_EFAULT;
3409 
3410     if ((int)addrlen < 0) {
3411         return -TARGET_EINVAL;
3412     }
3413 
3414     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3415         return -TARGET_EFAULT;
3416     }
3417 
3418     addr = alloca(addrlen);
3419 
3420     ret_addrlen = addrlen;
3421     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3422     if (!is_error(ret)) {
3423         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3424         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3425             ret = -TARGET_EFAULT;
3426         }
3427     }
3428     return ret;
3429 }
3430 
3431 /* do_getpeername() Must return target values and target errnos. */
3432 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3433                                abi_ulong target_addrlen_addr)
3434 {
3435     socklen_t addrlen, ret_addrlen;
3436     void *addr;
3437     abi_long ret;
3438 
3439     if (get_user_u32(addrlen, target_addrlen_addr))
3440         return -TARGET_EFAULT;
3441 
3442     if ((int)addrlen < 0) {
3443         return -TARGET_EINVAL;
3444     }
3445 
3446     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3447         return -TARGET_EFAULT;
3448     }
3449 
3450     addr = alloca(addrlen);
3451 
3452     ret_addrlen = addrlen;
3453     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3454     if (!is_error(ret)) {
3455         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3456         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3457             ret = -TARGET_EFAULT;
3458         }
3459     }
3460     return ret;
3461 }
3462 
3463 /* do_getsockname() Must return target values and target errnos. */
3464 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3465                                abi_ulong target_addrlen_addr)
3466 {
3467     socklen_t addrlen, ret_addrlen;
3468     void *addr;
3469     abi_long ret;
3470 
3471     if (get_user_u32(addrlen, target_addrlen_addr))
3472         return -TARGET_EFAULT;
3473 
3474     if ((int)addrlen < 0) {
3475         return -TARGET_EINVAL;
3476     }
3477 
3478     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3479         return -TARGET_EFAULT;
3480     }
3481 
3482     addr = alloca(addrlen);
3483 
3484     ret_addrlen = addrlen;
3485     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3486     if (!is_error(ret)) {
3487         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3488         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3489             ret = -TARGET_EFAULT;
3490         }
3491     }
3492     return ret;
3493 }
3494 
3495 /* do_socketpair() Must return target values and target errnos. */
3496 static abi_long do_socketpair(int domain, int type, int protocol,
3497                               abi_ulong target_tab_addr)
3498 {
3499     int tab[2];
3500     abi_long ret;
3501 
3502     target_to_host_sock_type(&type);
3503 
3504     ret = get_errno(socketpair(domain, type, protocol, tab));
3505     if (!is_error(ret)) {
3506         if (put_user_s32(tab[0], target_tab_addr)
3507             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3508             ret = -TARGET_EFAULT;
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_sendto() Must return target values and target errnos. */
3514 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3515                           abi_ulong target_addr, socklen_t addrlen)
3516 {
3517     void *addr;
3518     void *host_msg;
3519     void *copy_msg = NULL;
3520     abi_long ret;
3521 
3522     if ((int)addrlen < 0) {
3523         return -TARGET_EINVAL;
3524     }
3525 
3526     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3527     if (!host_msg)
3528         return -TARGET_EFAULT;
3529     if (fd_trans_target_to_host_data(fd)) {
3530         copy_msg = host_msg;
3531         host_msg = g_malloc(len);
3532         memcpy(host_msg, copy_msg, len);
3533         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3534         if (ret < 0) {
3535             goto fail;
3536         }
3537     }
3538     if (target_addr) {
3539         addr = alloca(addrlen+1);
3540         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3541         if (ret) {
3542             goto fail;
3543         }
3544         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3545     } else {
3546         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3547     }
3548 fail:
3549     if (copy_msg) {
3550         g_free(host_msg);
3551         host_msg = copy_msg;
3552     }
3553     unlock_user(host_msg, msg, 0);
3554     return ret;
3555 }
3556 
3557 /* do_recvfrom() Must return target values and target errnos. */
3558 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3559                             abi_ulong target_addr,
3560                             abi_ulong target_addrlen)
3561 {
3562     socklen_t addrlen, ret_addrlen;
3563     void *addr;
3564     void *host_msg;
3565     abi_long ret;
3566 
3567     if (!msg) {
3568         host_msg = NULL;
3569     } else {
3570         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3571         if (!host_msg) {
3572             return -TARGET_EFAULT;
3573         }
3574     }
3575     if (target_addr) {
3576         if (get_user_u32(addrlen, target_addrlen)) {
3577             ret = -TARGET_EFAULT;
3578             goto fail;
3579         }
3580         if ((int)addrlen < 0) {
3581             ret = -TARGET_EINVAL;
3582             goto fail;
3583         }
3584         addr = alloca(addrlen);
3585         ret_addrlen = addrlen;
3586         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3587                                       addr, &ret_addrlen));
3588     } else {
3589         addr = NULL; /* To keep compiler quiet.  */
3590         addrlen = 0; /* To keep compiler quiet.  */
3591         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3592     }
3593     if (!is_error(ret)) {
3594         if (fd_trans_host_to_target_data(fd)) {
3595             abi_long trans;
3596             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3597             if (is_error(trans)) {
3598                 ret = trans;
3599                 goto fail;
3600             }
3601         }
3602         if (target_addr) {
3603             host_to_target_sockaddr(target_addr, addr,
3604                                     MIN(addrlen, ret_addrlen));
3605             if (put_user_u32(ret_addrlen, target_addrlen)) {
3606                 ret = -TARGET_EFAULT;
3607                 goto fail;
3608             }
3609         }
3610         unlock_user(host_msg, msg, len);
3611     } else {
3612 fail:
3613         unlock_user(host_msg, msg, 0);
3614     }
3615     return ret;
3616 }
3617 
3618 #ifdef TARGET_NR_socketcall
3619 /* do_socketcall() must return target values and target errnos. */
3620 static abi_long do_socketcall(int num, abi_ulong vptr)
3621 {
3622     static const unsigned nargs[] = { /* number of arguments per operation */
3623         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3624         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3625         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3626         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3627         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3628         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3629         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3630         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3631         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3632         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3633         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3634         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3635         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3636         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3637         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3638         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3639         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3640         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3641         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3642         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3643     };
3644     abi_long a[6]; /* max 6 args */
3645     unsigned i;
3646 
3647     /* check the range of the first argument num */
3648     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3649     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3650         return -TARGET_EINVAL;
3651     }
3652     /* ensure we have space for args */
3653     if (nargs[num] > ARRAY_SIZE(a)) {
3654         return -TARGET_EINVAL;
3655     }
3656     /* collect the arguments in a[] according to nargs[] */
3657     for (i = 0; i < nargs[num]; ++i) {
3658         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3659             return -TARGET_EFAULT;
3660         }
3661     }
3662     /* now when we have the args, invoke the appropriate underlying function */
3663     switch (num) {
3664     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3665         return do_socket(a[0], a[1], a[2]);
3666     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3667         return do_bind(a[0], a[1], a[2]);
3668     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3669         return do_connect(a[0], a[1], a[2]);
3670     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3671         return get_errno(listen(a[0], a[1]));
3672     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3673         return do_accept4(a[0], a[1], a[2], 0);
3674     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3675         return do_getsockname(a[0], a[1], a[2]);
3676     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3677         return do_getpeername(a[0], a[1], a[2]);
3678     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3679         return do_socketpair(a[0], a[1], a[2], a[3]);
3680     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3681         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3682     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3683         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3684     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3685         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3686     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3687         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3688     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3689         return get_errno(shutdown(a[0], a[1]));
3690     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3691         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3692     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3693         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3694     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3695         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3696     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3697         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3698     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3699         return do_accept4(a[0], a[1], a[2], a[3]);
3700     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3701         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3702     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3703         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3704     default:
3705         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3706         return -TARGET_EINVAL;
3707     }
3708 }
3709 #endif
3710 
3711 #define N_SHM_REGIONS	32
3712 
3713 static struct shm_region {
3714     abi_ulong start;
3715     abi_ulong size;
3716     bool in_use;
3717 } shm_regions[N_SHM_REGIONS];
3718 
3719 #ifndef TARGET_SEMID64_DS
3720 /* asm-generic version of this struct */
3721 struct target_semid64_ds
3722 {
3723   struct target_ipc_perm sem_perm;
3724   abi_ulong sem_otime;
3725 #if TARGET_ABI_BITS == 32
3726   abi_ulong __unused1;
3727 #endif
3728   abi_ulong sem_ctime;
3729 #if TARGET_ABI_BITS == 32
3730   abi_ulong __unused2;
3731 #endif
3732   abi_ulong sem_nsems;
3733   abi_ulong __unused3;
3734   abi_ulong __unused4;
3735 };
3736 #endif
3737 
3738 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3739                                                abi_ulong target_addr)
3740 {
3741     struct target_ipc_perm *target_ip;
3742     struct target_semid64_ds *target_sd;
3743 
3744     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3745         return -TARGET_EFAULT;
3746     target_ip = &(target_sd->sem_perm);
3747     host_ip->__key = tswap32(target_ip->__key);
3748     host_ip->uid = tswap32(target_ip->uid);
3749     host_ip->gid = tswap32(target_ip->gid);
3750     host_ip->cuid = tswap32(target_ip->cuid);
3751     host_ip->cgid = tswap32(target_ip->cgid);
3752 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3753     host_ip->mode = tswap32(target_ip->mode);
3754 #else
3755     host_ip->mode = tswap16(target_ip->mode);
3756 #endif
3757 #if defined(TARGET_PPC)
3758     host_ip->__seq = tswap32(target_ip->__seq);
3759 #else
3760     host_ip->__seq = tswap16(target_ip->__seq);
3761 #endif
3762     unlock_user_struct(target_sd, target_addr, 0);
3763     return 0;
3764 }
3765 
3766 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3767                                                struct ipc_perm *host_ip)
3768 {
3769     struct target_ipc_perm *target_ip;
3770     struct target_semid64_ds *target_sd;
3771 
3772     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3773         return -TARGET_EFAULT;
3774     target_ip = &(target_sd->sem_perm);
3775     target_ip->__key = tswap32(host_ip->__key);
3776     target_ip->uid = tswap32(host_ip->uid);
3777     target_ip->gid = tswap32(host_ip->gid);
3778     target_ip->cuid = tswap32(host_ip->cuid);
3779     target_ip->cgid = tswap32(host_ip->cgid);
3780 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3781     target_ip->mode = tswap32(host_ip->mode);
3782 #else
3783     target_ip->mode = tswap16(host_ip->mode);
3784 #endif
3785 #if defined(TARGET_PPC)
3786     target_ip->__seq = tswap32(host_ip->__seq);
3787 #else
3788     target_ip->__seq = tswap16(host_ip->__seq);
3789 #endif
3790     unlock_user_struct(target_sd, target_addr, 1);
3791     return 0;
3792 }
3793 
3794 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3795                                                abi_ulong target_addr)
3796 {
3797     struct target_semid64_ds *target_sd;
3798 
3799     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3800         return -TARGET_EFAULT;
3801     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3802         return -TARGET_EFAULT;
3803     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3804     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3805     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3806     unlock_user_struct(target_sd, target_addr, 0);
3807     return 0;
3808 }
3809 
3810 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3811                                                struct semid_ds *host_sd)
3812 {
3813     struct target_semid64_ds *target_sd;
3814 
3815     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3816         return -TARGET_EFAULT;
3817     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3818         return -TARGET_EFAULT;
3819     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3820     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3821     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3822     unlock_user_struct(target_sd, target_addr, 1);
3823     return 0;
3824 }
3825 
3826 struct target_seminfo {
3827     int semmap;
3828     int semmni;
3829     int semmns;
3830     int semmnu;
3831     int semmsl;
3832     int semopm;
3833     int semume;
3834     int semusz;
3835     int semvmx;
3836     int semaem;
3837 };
3838 
3839 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3840                                               struct seminfo *host_seminfo)
3841 {
3842     struct target_seminfo *target_seminfo;
3843     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3844         return -TARGET_EFAULT;
3845     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3846     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3847     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3848     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3849     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3850     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3851     __put_user(host_seminfo->semume, &target_seminfo->semume);
3852     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3853     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3854     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3855     unlock_user_struct(target_seminfo, target_addr, 1);
3856     return 0;
3857 }
3858 
3859 union semun {
3860 	int val;
3861 	struct semid_ds *buf;
3862 	unsigned short *array;
3863 	struct seminfo *__buf;
3864 };
3865 
3866 union target_semun {
3867 	int val;
3868 	abi_ulong buf;
3869 	abi_ulong array;
3870 	abi_ulong __buf;
3871 };
3872 
3873 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3874                                                abi_ulong target_addr)
3875 {
3876     int nsems;
3877     unsigned short *array;
3878     union semun semun;
3879     struct semid_ds semid_ds;
3880     int i, ret;
3881 
3882     semun.buf = &semid_ds;
3883 
3884     ret = semctl(semid, 0, IPC_STAT, semun);
3885     if (ret == -1)
3886         return get_errno(ret);
3887 
3888     nsems = semid_ds.sem_nsems;
3889 
3890     *host_array = g_try_new(unsigned short, nsems);
3891     if (!*host_array) {
3892         return -TARGET_ENOMEM;
3893     }
3894     array = lock_user(VERIFY_READ, target_addr,
3895                       nsems*sizeof(unsigned short), 1);
3896     if (!array) {
3897         g_free(*host_array);
3898         return -TARGET_EFAULT;
3899     }
3900 
3901     for(i=0; i<nsems; i++) {
3902         __get_user((*host_array)[i], &array[i]);
3903     }
3904     unlock_user(array, target_addr, 0);
3905 
3906     return 0;
3907 }
3908 
3909 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3910                                                unsigned short **host_array)
3911 {
3912     int nsems;
3913     unsigned short *array;
3914     union semun semun;
3915     struct semid_ds semid_ds;
3916     int i, ret;
3917 
3918     semun.buf = &semid_ds;
3919 
3920     ret = semctl(semid, 0, IPC_STAT, semun);
3921     if (ret == -1)
3922         return get_errno(ret);
3923 
3924     nsems = semid_ds.sem_nsems;
3925 
3926     array = lock_user(VERIFY_WRITE, target_addr,
3927                       nsems*sizeof(unsigned short), 0);
3928     if (!array)
3929         return -TARGET_EFAULT;
3930 
3931     for(i=0; i<nsems; i++) {
3932         __put_user((*host_array)[i], &array[i]);
3933     }
3934     g_free(*host_array);
3935     unlock_user(array, target_addr, 1);
3936 
3937     return 0;
3938 }
3939 
3940 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3941                                  abi_ulong target_arg)
3942 {
3943     union target_semun target_su = { .buf = target_arg };
3944     union semun arg;
3945     struct semid_ds dsarg;
3946     unsigned short *array = NULL;
3947     struct seminfo seminfo;
3948     abi_long ret = -TARGET_EINVAL;
3949     abi_long err;
3950     cmd &= 0xff;
3951 
3952     switch( cmd ) {
3953 	case GETVAL:
3954 	case SETVAL:
3955             /* In 64 bit cross-endian situations, we will erroneously pick up
3956              * the wrong half of the union for the "val" element.  To rectify
3957              * this, the entire 8-byte structure is byteswapped, followed by
3958 	     * a swap of the 4 byte val field. In other cases, the data is
3959 	     * already in proper host byte order. */
3960 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3961 		target_su.buf = tswapal(target_su.buf);
3962 		arg.val = tswap32(target_su.val);
3963 	    } else {
3964 		arg.val = target_su.val;
3965 	    }
3966             ret = get_errno(semctl(semid, semnum, cmd, arg));
3967             break;
3968 	case GETALL:
3969 	case SETALL:
3970             err = target_to_host_semarray(semid, &array, target_su.array);
3971             if (err)
3972                 return err;
3973             arg.array = array;
3974             ret = get_errno(semctl(semid, semnum, cmd, arg));
3975             err = host_to_target_semarray(semid, target_su.array, &array);
3976             if (err)
3977                 return err;
3978             break;
3979 	case IPC_STAT:
3980 	case IPC_SET:
3981 	case SEM_STAT:
3982             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3983             if (err)
3984                 return err;
3985             arg.buf = &dsarg;
3986             ret = get_errno(semctl(semid, semnum, cmd, arg));
3987             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3988             if (err)
3989                 return err;
3990             break;
3991 	case IPC_INFO:
3992 	case SEM_INFO:
3993             arg.__buf = &seminfo;
3994             ret = get_errno(semctl(semid, semnum, cmd, arg));
3995             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3996             if (err)
3997                 return err;
3998             break;
3999 	case IPC_RMID:
4000 	case GETPID:
4001 	case GETNCNT:
4002 	case GETZCNT:
4003             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4004             break;
4005     }
4006 
4007     return ret;
4008 }
4009 
4010 struct target_sembuf {
4011     unsigned short sem_num;
4012     short sem_op;
4013     short sem_flg;
4014 };
4015 
4016 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4017                                              abi_ulong target_addr,
4018                                              unsigned nsops)
4019 {
4020     struct target_sembuf *target_sembuf;
4021     int i;
4022 
4023     target_sembuf = lock_user(VERIFY_READ, target_addr,
4024                               nsops*sizeof(struct target_sembuf), 1);
4025     if (!target_sembuf)
4026         return -TARGET_EFAULT;
4027 
4028     for(i=0; i<nsops; i++) {
4029         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4030         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4031         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4032     }
4033 
4034     unlock_user(target_sembuf, target_addr, 0);
4035 
4036     return 0;
4037 }
4038 
4039 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4040     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4041 
4042 /*
4043  * This macro is required to handle the s390 variants, which passes the
4044  * arguments in a different order than default.
4045  */
4046 #ifdef __s390x__
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048   (__nsops), (__timeout), (__sops)
4049 #else
4050 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4051   (__nsops), 0, (__sops), (__timeout)
4052 #endif
4053 
4054 static inline abi_long do_semtimedop(int semid,
4055                                      abi_long ptr,
4056                                      unsigned nsops,
4057                                      abi_long timeout, bool time64)
4058 {
4059     struct sembuf *sops;
4060     struct timespec ts, *pts = NULL;
4061     abi_long ret;
4062 
4063     if (timeout) {
4064         pts = &ts;
4065         if (time64) {
4066             if (target_to_host_timespec64(pts, timeout)) {
4067                 return -TARGET_EFAULT;
4068             }
4069         } else {
4070             if (target_to_host_timespec(pts, timeout)) {
4071                 return -TARGET_EFAULT;
4072             }
4073         }
4074     }
4075 
4076     if (nsops > TARGET_SEMOPM) {
4077         return -TARGET_E2BIG;
4078     }
4079 
4080     sops = g_new(struct sembuf, nsops);
4081 
4082     if (target_to_host_sembuf(sops, ptr, nsops)) {
4083         g_free(sops);
4084         return -TARGET_EFAULT;
4085     }
4086 
4087     ret = -TARGET_ENOSYS;
4088 #ifdef __NR_semtimedop
4089     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4090 #endif
4091 #ifdef __NR_ipc
4092     if (ret == -TARGET_ENOSYS) {
4093         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4094                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4095     }
4096 #endif
4097     g_free(sops);
4098     return ret;
4099 }
4100 #endif
4101 
4102 struct target_msqid_ds
4103 {
4104     struct target_ipc_perm msg_perm;
4105     abi_ulong msg_stime;
4106 #if TARGET_ABI_BITS == 32
4107     abi_ulong __unused1;
4108 #endif
4109     abi_ulong msg_rtime;
4110 #if TARGET_ABI_BITS == 32
4111     abi_ulong __unused2;
4112 #endif
4113     abi_ulong msg_ctime;
4114 #if TARGET_ABI_BITS == 32
4115     abi_ulong __unused3;
4116 #endif
4117     abi_ulong __msg_cbytes;
4118     abi_ulong msg_qnum;
4119     abi_ulong msg_qbytes;
4120     abi_ulong msg_lspid;
4121     abi_ulong msg_lrpid;
4122     abi_ulong __unused4;
4123     abi_ulong __unused5;
4124 };
4125 
4126 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4127                                                abi_ulong target_addr)
4128 {
4129     struct target_msqid_ds *target_md;
4130 
4131     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4132         return -TARGET_EFAULT;
4133     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4134         return -TARGET_EFAULT;
4135     host_md->msg_stime = tswapal(target_md->msg_stime);
4136     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4137     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4138     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4139     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4140     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4141     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4142     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4143     unlock_user_struct(target_md, target_addr, 0);
4144     return 0;
4145 }
4146 
4147 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4148                                                struct msqid_ds *host_md)
4149 {
4150     struct target_msqid_ds *target_md;
4151 
4152     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4153         return -TARGET_EFAULT;
4154     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4155         return -TARGET_EFAULT;
4156     target_md->msg_stime = tswapal(host_md->msg_stime);
4157     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4158     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4159     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4160     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4161     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4162     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4163     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4164     unlock_user_struct(target_md, target_addr, 1);
4165     return 0;
4166 }
4167 
4168 struct target_msginfo {
4169     int msgpool;
4170     int msgmap;
4171     int msgmax;
4172     int msgmnb;
4173     int msgmni;
4174     int msgssz;
4175     int msgtql;
4176     unsigned short int msgseg;
4177 };
4178 
4179 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4180                                               struct msginfo *host_msginfo)
4181 {
4182     struct target_msginfo *target_msginfo;
4183     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4184         return -TARGET_EFAULT;
4185     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4186     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4187     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4188     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4189     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4190     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4191     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4192     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4193     unlock_user_struct(target_msginfo, target_addr, 1);
4194     return 0;
4195 }
4196 
4197 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4198 {
4199     struct msqid_ds dsarg;
4200     struct msginfo msginfo;
4201     abi_long ret = -TARGET_EINVAL;
4202 
4203     cmd &= 0xff;
4204 
4205     switch (cmd) {
4206     case IPC_STAT:
4207     case IPC_SET:
4208     case MSG_STAT:
4209         if (target_to_host_msqid_ds(&dsarg,ptr))
4210             return -TARGET_EFAULT;
4211         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4212         if (host_to_target_msqid_ds(ptr,&dsarg))
4213             return -TARGET_EFAULT;
4214         break;
4215     case IPC_RMID:
4216         ret = get_errno(msgctl(msgid, cmd, NULL));
4217         break;
4218     case IPC_INFO:
4219     case MSG_INFO:
4220         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4221         if (host_to_target_msginfo(ptr, &msginfo))
4222             return -TARGET_EFAULT;
4223         break;
4224     }
4225 
4226     return ret;
4227 }
4228 
4229 struct target_msgbuf {
4230     abi_long mtype;
4231     char	mtext[1];
4232 };
4233 
4234 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4235                                  ssize_t msgsz, int msgflg)
4236 {
4237     struct target_msgbuf *target_mb;
4238     struct msgbuf *host_mb;
4239     abi_long ret = 0;
4240 
4241     if (msgsz < 0) {
4242         return -TARGET_EINVAL;
4243     }
4244 
4245     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4246         return -TARGET_EFAULT;
4247     host_mb = g_try_malloc(msgsz + sizeof(long));
4248     if (!host_mb) {
4249         unlock_user_struct(target_mb, msgp, 0);
4250         return -TARGET_ENOMEM;
4251     }
4252     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4253     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4254     ret = -TARGET_ENOSYS;
4255 #ifdef __NR_msgsnd
4256     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4257 #endif
4258 #ifdef __NR_ipc
4259     if (ret == -TARGET_ENOSYS) {
4260 #ifdef __s390x__
4261         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4262                                  host_mb));
4263 #else
4264         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4265                                  host_mb, 0));
4266 #endif
4267     }
4268 #endif
4269     g_free(host_mb);
4270     unlock_user_struct(target_mb, msgp, 0);
4271 
4272     return ret;
4273 }
4274 
4275 #ifdef __NR_ipc
4276 #if defined(__sparc__)
4277 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4279 #elif defined(__s390x__)
4280 /* The s390 sys_ipc variant has only five parameters.  */
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282     ((long int[]){(long int)__msgp, __msgtyp})
4283 #else
4284 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4285     ((long int[]){(long int)__msgp, __msgtyp}), 0
4286 #endif
4287 #endif
4288 
4289 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4290                                  ssize_t msgsz, abi_long msgtyp,
4291                                  int msgflg)
4292 {
4293     struct target_msgbuf *target_mb;
4294     char *target_mtext;
4295     struct msgbuf *host_mb;
4296     abi_long ret = 0;
4297 
4298     if (msgsz < 0) {
4299         return -TARGET_EINVAL;
4300     }
4301 
4302     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4303         return -TARGET_EFAULT;
4304 
4305     host_mb = g_try_malloc(msgsz + sizeof(long));
4306     if (!host_mb) {
4307         ret = -TARGET_ENOMEM;
4308         goto end;
4309     }
4310     ret = -TARGET_ENOSYS;
4311 #ifdef __NR_msgrcv
4312     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4313 #endif
4314 #ifdef __NR_ipc
4315     if (ret == -TARGET_ENOSYS) {
4316         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4317                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4318     }
4319 #endif
4320 
4321     if (ret > 0) {
4322         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4323         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4324         if (!target_mtext) {
4325             ret = -TARGET_EFAULT;
4326             goto end;
4327         }
4328         memcpy(target_mb->mtext, host_mb->mtext, ret);
4329         unlock_user(target_mtext, target_mtext_addr, ret);
4330     }
4331 
4332     target_mb->mtype = tswapal(host_mb->mtype);
4333 
4334 end:
4335     if (target_mb)
4336         unlock_user_struct(target_mb, msgp, 1);
4337     g_free(host_mb);
4338     return ret;
4339 }
4340 
4341 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4342                                                abi_ulong target_addr)
4343 {
4344     struct target_shmid_ds *target_sd;
4345 
4346     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4347         return -TARGET_EFAULT;
4348     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4349         return -TARGET_EFAULT;
4350     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4351     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4352     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4353     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4354     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4355     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4356     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4357     unlock_user_struct(target_sd, target_addr, 0);
4358     return 0;
4359 }
4360 
4361 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4362                                                struct shmid_ds *host_sd)
4363 {
4364     struct target_shmid_ds *target_sd;
4365 
4366     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4367         return -TARGET_EFAULT;
4368     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4369         return -TARGET_EFAULT;
4370     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4371     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4372     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4373     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4374     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4375     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4376     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4377     unlock_user_struct(target_sd, target_addr, 1);
4378     return 0;
4379 }
4380 
4381 struct  target_shminfo {
4382     abi_ulong shmmax;
4383     abi_ulong shmmin;
4384     abi_ulong shmmni;
4385     abi_ulong shmseg;
4386     abi_ulong shmall;
4387 };
4388 
4389 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4390                                               struct shminfo *host_shminfo)
4391 {
4392     struct target_shminfo *target_shminfo;
4393     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4394         return -TARGET_EFAULT;
4395     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4396     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4397     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4398     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4399     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4400     unlock_user_struct(target_shminfo, target_addr, 1);
4401     return 0;
4402 }
4403 
4404 struct target_shm_info {
4405     int used_ids;
4406     abi_ulong shm_tot;
4407     abi_ulong shm_rss;
4408     abi_ulong shm_swp;
4409     abi_ulong swap_attempts;
4410     abi_ulong swap_successes;
4411 };
4412 
4413 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4414                                                struct shm_info *host_shm_info)
4415 {
4416     struct target_shm_info *target_shm_info;
4417     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4418         return -TARGET_EFAULT;
4419     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4420     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4421     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4422     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4423     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4424     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4425     unlock_user_struct(target_shm_info, target_addr, 1);
4426     return 0;
4427 }
4428 
4429 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4430 {
4431     struct shmid_ds dsarg;
4432     struct shminfo shminfo;
4433     struct shm_info shm_info;
4434     abi_long ret = -TARGET_EINVAL;
4435 
4436     cmd &= 0xff;
4437 
4438     switch(cmd) {
4439     case IPC_STAT:
4440     case IPC_SET:
4441     case SHM_STAT:
4442         if (target_to_host_shmid_ds(&dsarg, buf))
4443             return -TARGET_EFAULT;
4444         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4445         if (host_to_target_shmid_ds(buf, &dsarg))
4446             return -TARGET_EFAULT;
4447         break;
4448     case IPC_INFO:
4449         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4450         if (host_to_target_shminfo(buf, &shminfo))
4451             return -TARGET_EFAULT;
4452         break;
4453     case SHM_INFO:
4454         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4455         if (host_to_target_shm_info(buf, &shm_info))
4456             return -TARGET_EFAULT;
4457         break;
4458     case IPC_RMID:
4459     case SHM_LOCK:
4460     case SHM_UNLOCK:
4461         ret = get_errno(shmctl(shmid, cmd, NULL));
4462         break;
4463     }
4464 
4465     return ret;
4466 }
4467 
4468 #ifndef TARGET_FORCE_SHMLBA
4469 /* For most architectures, SHMLBA is the same as the page size;
4470  * some architectures have larger values, in which case they should
4471  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4472  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4473  * and defining its own value for SHMLBA.
4474  *
4475  * The kernel also permits SHMLBA to be set by the architecture to a
4476  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4477  * this means that addresses are rounded to the large size if
4478  * SHM_RND is set but addresses not aligned to that size are not rejected
4479  * as long as they are at least page-aligned. Since the only architecture
4480  * which uses this is ia64 this code doesn't provide for that oddity.
4481  */
4482 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4483 {
4484     return TARGET_PAGE_SIZE;
4485 }
4486 #endif
4487 
4488 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4489                                  int shmid, abi_ulong shmaddr, int shmflg)
4490 {
4491     CPUState *cpu = env_cpu(cpu_env);
4492     abi_long raddr;
4493     void *host_raddr;
4494     struct shmid_ds shm_info;
4495     int i,ret;
4496     abi_ulong shmlba;
4497 
4498     /* shmat pointers are always untagged */
4499 
4500     /* find out the length of the shared memory segment */
4501     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4502     if (is_error(ret)) {
4503         /* can't get length, bail out */
4504         return ret;
4505     }
4506 
4507     shmlba = target_shmlba(cpu_env);
4508 
4509     if (shmaddr & (shmlba - 1)) {
4510         if (shmflg & SHM_RND) {
4511             shmaddr &= ~(shmlba - 1);
4512         } else {
4513             return -TARGET_EINVAL;
4514         }
4515     }
4516     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4517         return -TARGET_EINVAL;
4518     }
4519 
4520     mmap_lock();
4521 
4522     /*
4523      * We're mapping shared memory, so ensure we generate code for parallel
4524      * execution and flush old translations.  This will work up to the level
4525      * supported by the host -- anything that requires EXCP_ATOMIC will not
4526      * be atomic with respect to an external process.
4527      */
4528     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4529         cpu->tcg_cflags |= CF_PARALLEL;
4530         tb_flush(cpu);
4531     }
4532 
4533     if (shmaddr)
4534         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4535     else {
4536         abi_ulong mmap_start;
4537 
4538         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4539         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4540 
4541         if (mmap_start == -1) {
4542             errno = ENOMEM;
4543             host_raddr = (void *)-1;
4544         } else
4545             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4546                                shmflg | SHM_REMAP);
4547     }
4548 
4549     if (host_raddr == (void *)-1) {
4550         mmap_unlock();
4551         return get_errno((long)host_raddr);
4552     }
4553     raddr=h2g((unsigned long)host_raddr);
4554 
4555     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4556                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4557                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4558 
4559     for (i = 0; i < N_SHM_REGIONS; i++) {
4560         if (!shm_regions[i].in_use) {
4561             shm_regions[i].in_use = true;
4562             shm_regions[i].start = raddr;
4563             shm_regions[i].size = shm_info.shm_segsz;
4564             break;
4565         }
4566     }
4567 
4568     mmap_unlock();
4569     return raddr;
4570 
4571 }
4572 
4573 static inline abi_long do_shmdt(abi_ulong shmaddr)
4574 {
4575     int i;
4576     abi_long rv;
4577 
4578     /* shmdt pointers are always untagged */
4579 
4580     mmap_lock();
4581 
4582     for (i = 0; i < N_SHM_REGIONS; ++i) {
4583         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4584             shm_regions[i].in_use = false;
4585             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4586             break;
4587         }
4588     }
4589     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4590 
4591     mmap_unlock();
4592 
4593     return rv;
4594 }
4595 
4596 #ifdef TARGET_NR_ipc
4597 /* ??? This only works with linear mappings.  */
4598 /* do_ipc() must return target values and target errnos. */
4599 static abi_long do_ipc(CPUArchState *cpu_env,
4600                        unsigned int call, abi_long first,
4601                        abi_long second, abi_long third,
4602                        abi_long ptr, abi_long fifth)
4603 {
4604     int version;
4605     abi_long ret = 0;
4606 
4607     version = call >> 16;
4608     call &= 0xffff;
4609 
4610     switch (call) {
4611     case IPCOP_semop:
4612         ret = do_semtimedop(first, ptr, second, 0, false);
4613         break;
4614     case IPCOP_semtimedop:
4615     /*
4616      * The s390 sys_ipc variant has only five parameters instead of six
4617      * (as for default variant) and the only difference is the handling of
4618      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4619      * to a struct timespec where the generic variant uses fifth parameter.
4620      */
4621 #if defined(TARGET_S390X)
4622         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4623 #else
4624         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4625 #endif
4626         break;
4627 
4628     case IPCOP_semget:
4629         ret = get_errno(semget(first, second, third));
4630         break;
4631 
4632     case IPCOP_semctl: {
4633         /* The semun argument to semctl is passed by value, so dereference the
4634          * ptr argument. */
4635         abi_ulong atptr;
4636         get_user_ual(atptr, ptr);
4637         ret = do_semctl(first, second, third, atptr);
4638         break;
4639     }
4640 
4641     case IPCOP_msgget:
4642         ret = get_errno(msgget(first, second));
4643         break;
4644 
4645     case IPCOP_msgsnd:
4646         ret = do_msgsnd(first, ptr, second, third);
4647         break;
4648 
4649     case IPCOP_msgctl:
4650         ret = do_msgctl(first, second, ptr);
4651         break;
4652 
4653     case IPCOP_msgrcv:
4654         switch (version) {
4655         case 0:
4656             {
4657                 struct target_ipc_kludge {
4658                     abi_long msgp;
4659                     abi_long msgtyp;
4660                 } *tmp;
4661 
4662                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4663                     ret = -TARGET_EFAULT;
4664                     break;
4665                 }
4666 
4667                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4668 
4669                 unlock_user_struct(tmp, ptr, 0);
4670                 break;
4671             }
4672         default:
4673             ret = do_msgrcv(first, ptr, second, fifth, third);
4674         }
4675         break;
4676 
4677     case IPCOP_shmat:
4678         switch (version) {
4679         default:
4680         {
4681             abi_ulong raddr;
4682             raddr = do_shmat(cpu_env, first, ptr, second);
4683             if (is_error(raddr))
4684                 return get_errno(raddr);
4685             if (put_user_ual(raddr, third))
4686                 return -TARGET_EFAULT;
4687             break;
4688         }
4689         case 1:
4690             ret = -TARGET_EINVAL;
4691             break;
4692         }
4693 	break;
4694     case IPCOP_shmdt:
4695         ret = do_shmdt(ptr);
4696 	break;
4697 
4698     case IPCOP_shmget:
4699 	/* IPC_* flag values are the same on all linux platforms */
4700 	ret = get_errno(shmget(first, second, third));
4701 	break;
4702 
4703 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4704     case IPCOP_shmctl:
4705         ret = do_shmctl(first, second, ptr);
4706         break;
4707     default:
4708         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4709                       call, version);
4710 	ret = -TARGET_ENOSYS;
4711 	break;
4712     }
4713     return ret;
4714 }
4715 #endif
4716 
4717 /* kernel structure types definitions */
4718 
4719 #define STRUCT(name, ...) STRUCT_ ## name,
4720 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4721 enum {
4722 #include "syscall_types.h"
4723 STRUCT_MAX
4724 };
4725 #undef STRUCT
4726 #undef STRUCT_SPECIAL
4727 
4728 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4729 #define STRUCT_SPECIAL(name)
4730 #include "syscall_types.h"
4731 #undef STRUCT
4732 #undef STRUCT_SPECIAL
4733 
4734 #define MAX_STRUCT_SIZE 4096
4735 
4736 #ifdef CONFIG_FIEMAP
4737 /* So fiemap access checks don't overflow on 32 bit systems.
4738  * This is very slightly smaller than the limit imposed by
4739  * the underlying kernel.
4740  */
4741 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4742                             / sizeof(struct fiemap_extent))
4743 
4744 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4745                                        int fd, int cmd, abi_long arg)
4746 {
4747     /* The parameter for this ioctl is a struct fiemap followed
4748      * by an array of struct fiemap_extent whose size is set
4749      * in fiemap->fm_extent_count. The array is filled in by the
4750      * ioctl.
4751      */
4752     int target_size_in, target_size_out;
4753     struct fiemap *fm;
4754     const argtype *arg_type = ie->arg_type;
4755     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4756     void *argptr, *p;
4757     abi_long ret;
4758     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4759     uint32_t outbufsz;
4760     int free_fm = 0;
4761 
4762     assert(arg_type[0] == TYPE_PTR);
4763     assert(ie->access == IOC_RW);
4764     arg_type++;
4765     target_size_in = thunk_type_size(arg_type, 0);
4766     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4767     if (!argptr) {
4768         return -TARGET_EFAULT;
4769     }
4770     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4771     unlock_user(argptr, arg, 0);
4772     fm = (struct fiemap *)buf_temp;
4773     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4774         return -TARGET_EINVAL;
4775     }
4776 
4777     outbufsz = sizeof (*fm) +
4778         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4779 
4780     if (outbufsz > MAX_STRUCT_SIZE) {
4781         /* We can't fit all the extents into the fixed size buffer.
4782          * Allocate one that is large enough and use it instead.
4783          */
4784         fm = g_try_malloc(outbufsz);
4785         if (!fm) {
4786             return -TARGET_ENOMEM;
4787         }
4788         memcpy(fm, buf_temp, sizeof(struct fiemap));
4789         free_fm = 1;
4790     }
4791     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4792     if (!is_error(ret)) {
4793         target_size_out = target_size_in;
4794         /* An extent_count of 0 means we were only counting the extents
4795          * so there are no structs to copy
4796          */
4797         if (fm->fm_extent_count != 0) {
4798             target_size_out += fm->fm_mapped_extents * extent_size;
4799         }
4800         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4801         if (!argptr) {
4802             ret = -TARGET_EFAULT;
4803         } else {
4804             /* Convert the struct fiemap */
4805             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4806             if (fm->fm_extent_count != 0) {
4807                 p = argptr + target_size_in;
4808                 /* ...and then all the struct fiemap_extents */
4809                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4810                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4811                                   THUNK_TARGET);
4812                     p += extent_size;
4813                 }
4814             }
4815             unlock_user(argptr, arg, target_size_out);
4816         }
4817     }
4818     if (free_fm) {
4819         g_free(fm);
4820     }
4821     return ret;
4822 }
4823 #endif
4824 
4825 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4826                                 int fd, int cmd, abi_long arg)
4827 {
4828     const argtype *arg_type = ie->arg_type;
4829     int target_size;
4830     void *argptr;
4831     int ret;
4832     struct ifconf *host_ifconf;
4833     uint32_t outbufsz;
4834     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4835     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4836     int target_ifreq_size;
4837     int nb_ifreq;
4838     int free_buf = 0;
4839     int i;
4840     int target_ifc_len;
4841     abi_long target_ifc_buf;
4842     int host_ifc_len;
4843     char *host_ifc_buf;
4844 
4845     assert(arg_type[0] == TYPE_PTR);
4846     assert(ie->access == IOC_RW);
4847 
4848     arg_type++;
4849     target_size = thunk_type_size(arg_type, 0);
4850 
4851     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4852     if (!argptr)
4853         return -TARGET_EFAULT;
4854     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4855     unlock_user(argptr, arg, 0);
4856 
4857     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4858     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4859     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4860 
4861     if (target_ifc_buf != 0) {
4862         target_ifc_len = host_ifconf->ifc_len;
4863         nb_ifreq = target_ifc_len / target_ifreq_size;
4864         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4865 
4866         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4867         if (outbufsz > MAX_STRUCT_SIZE) {
4868             /*
4869              * We can't fit all the extents into the fixed size buffer.
4870              * Allocate one that is large enough and use it instead.
4871              */
4872             host_ifconf = g_try_malloc(outbufsz);
4873             if (!host_ifconf) {
4874                 return -TARGET_ENOMEM;
4875             }
4876             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4877             free_buf = 1;
4878         }
4879         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4880 
4881         host_ifconf->ifc_len = host_ifc_len;
4882     } else {
4883       host_ifc_buf = NULL;
4884     }
4885     host_ifconf->ifc_buf = host_ifc_buf;
4886 
4887     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4888     if (!is_error(ret)) {
4889 	/* convert host ifc_len to target ifc_len */
4890 
4891         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4892         target_ifc_len = nb_ifreq * target_ifreq_size;
4893         host_ifconf->ifc_len = target_ifc_len;
4894 
4895 	/* restore target ifc_buf */
4896 
4897         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4898 
4899 	/* copy struct ifconf to target user */
4900 
4901         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4902         if (!argptr)
4903             return -TARGET_EFAULT;
4904         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4905         unlock_user(argptr, arg, target_size);
4906 
4907         if (target_ifc_buf != 0) {
4908             /* copy ifreq[] to target user */
4909             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4910             for (i = 0; i < nb_ifreq ; i++) {
4911                 thunk_convert(argptr + i * target_ifreq_size,
4912                               host_ifc_buf + i * sizeof(struct ifreq),
4913                               ifreq_arg_type, THUNK_TARGET);
4914             }
4915             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4916         }
4917     }
4918 
4919     if (free_buf) {
4920         g_free(host_ifconf);
4921     }
4922 
4923     return ret;
4924 }
4925 
4926 #if defined(CONFIG_USBFS)
4927 #if HOST_LONG_BITS > 64
4928 #error USBDEVFS thunks do not support >64 bit hosts yet.
4929 #endif
4930 struct live_urb {
4931     uint64_t target_urb_adr;
4932     uint64_t target_buf_adr;
4933     char *target_buf_ptr;
4934     struct usbdevfs_urb host_urb;
4935 };
4936 
4937 static GHashTable *usbdevfs_urb_hashtable(void)
4938 {
4939     static GHashTable *urb_hashtable;
4940 
4941     if (!urb_hashtable) {
4942         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4943     }
4944     return urb_hashtable;
4945 }
4946 
4947 static void urb_hashtable_insert(struct live_urb *urb)
4948 {
4949     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4950     g_hash_table_insert(urb_hashtable, urb, urb);
4951 }
4952 
4953 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4954 {
4955     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4956     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4957 }
4958 
4959 static void urb_hashtable_remove(struct live_urb *urb)
4960 {
4961     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4962     g_hash_table_remove(urb_hashtable, urb);
4963 }
4964 
4965 static abi_long
4966 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4967                           int fd, int cmd, abi_long arg)
4968 {
4969     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4970     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4971     struct live_urb *lurb;
4972     void *argptr;
4973     uint64_t hurb;
4974     int target_size;
4975     uintptr_t target_urb_adr;
4976     abi_long ret;
4977 
4978     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4979 
4980     memset(buf_temp, 0, sizeof(uint64_t));
4981     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4982     if (is_error(ret)) {
4983         return ret;
4984     }
4985 
4986     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4987     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4988     if (!lurb->target_urb_adr) {
4989         return -TARGET_EFAULT;
4990     }
4991     urb_hashtable_remove(lurb);
4992     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4993         lurb->host_urb.buffer_length);
4994     lurb->target_buf_ptr = NULL;
4995 
4996     /* restore the guest buffer pointer */
4997     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4998 
4999     /* update the guest urb struct */
5000     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5001     if (!argptr) {
5002         g_free(lurb);
5003         return -TARGET_EFAULT;
5004     }
5005     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5006     unlock_user(argptr, lurb->target_urb_adr, target_size);
5007 
5008     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5009     /* write back the urb handle */
5010     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5011     if (!argptr) {
5012         g_free(lurb);
5013         return -TARGET_EFAULT;
5014     }
5015 
5016     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5017     target_urb_adr = lurb->target_urb_adr;
5018     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5019     unlock_user(argptr, arg, target_size);
5020 
5021     g_free(lurb);
5022     return ret;
5023 }
5024 
5025 static abi_long
5026 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5027                              uint8_t *buf_temp __attribute__((unused)),
5028                              int fd, int cmd, abi_long arg)
5029 {
5030     struct live_urb *lurb;
5031 
5032     /* map target address back to host URB with metadata. */
5033     lurb = urb_hashtable_lookup(arg);
5034     if (!lurb) {
5035         return -TARGET_EFAULT;
5036     }
5037     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5038 }
5039 
5040 static abi_long
5041 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5042                             int fd, int cmd, abi_long arg)
5043 {
5044     const argtype *arg_type = ie->arg_type;
5045     int target_size;
5046     abi_long ret;
5047     void *argptr;
5048     int rw_dir;
5049     struct live_urb *lurb;
5050 
5051     /*
5052      * each submitted URB needs to map to a unique ID for the
5053      * kernel, and that unique ID needs to be a pointer to
5054      * host memory.  hence, we need to malloc for each URB.
5055      * isochronous transfers have a variable length struct.
5056      */
5057     arg_type++;
5058     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5059 
5060     /* construct host copy of urb and metadata */
5061     lurb = g_try_new0(struct live_urb, 1);
5062     if (!lurb) {
5063         return -TARGET_ENOMEM;
5064     }
5065 
5066     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5067     if (!argptr) {
5068         g_free(lurb);
5069         return -TARGET_EFAULT;
5070     }
5071     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5072     unlock_user(argptr, arg, 0);
5073 
5074     lurb->target_urb_adr = arg;
5075     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5076 
5077     /* buffer space used depends on endpoint type so lock the entire buffer */
5078     /* control type urbs should check the buffer contents for true direction */
5079     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5080     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5081         lurb->host_urb.buffer_length, 1);
5082     if (lurb->target_buf_ptr == NULL) {
5083         g_free(lurb);
5084         return -TARGET_EFAULT;
5085     }
5086 
5087     /* update buffer pointer in host copy */
5088     lurb->host_urb.buffer = lurb->target_buf_ptr;
5089 
5090     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5091     if (is_error(ret)) {
5092         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5093         g_free(lurb);
5094     } else {
5095         urb_hashtable_insert(lurb);
5096     }
5097 
5098     return ret;
5099 }
5100 #endif /* CONFIG_USBFS */
5101 
5102 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5103                             int cmd, abi_long arg)
5104 {
5105     void *argptr;
5106     struct dm_ioctl *host_dm;
5107     abi_long guest_data;
5108     uint32_t guest_data_size;
5109     int target_size;
5110     const argtype *arg_type = ie->arg_type;
5111     abi_long ret;
5112     void *big_buf = NULL;
5113     char *host_data;
5114 
5115     arg_type++;
5116     target_size = thunk_type_size(arg_type, 0);
5117     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5118     if (!argptr) {
5119         ret = -TARGET_EFAULT;
5120         goto out;
5121     }
5122     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5123     unlock_user(argptr, arg, 0);
5124 
5125     /* buf_temp is too small, so fetch things into a bigger buffer */
5126     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5127     memcpy(big_buf, buf_temp, target_size);
5128     buf_temp = big_buf;
5129     host_dm = big_buf;
5130 
5131     guest_data = arg + host_dm->data_start;
5132     if ((guest_data - arg) < 0) {
5133         ret = -TARGET_EINVAL;
5134         goto out;
5135     }
5136     guest_data_size = host_dm->data_size - host_dm->data_start;
5137     host_data = (char*)host_dm + host_dm->data_start;
5138 
5139     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5140     if (!argptr) {
5141         ret = -TARGET_EFAULT;
5142         goto out;
5143     }
5144 
5145     switch (ie->host_cmd) {
5146     case DM_REMOVE_ALL:
5147     case DM_LIST_DEVICES:
5148     case DM_DEV_CREATE:
5149     case DM_DEV_REMOVE:
5150     case DM_DEV_SUSPEND:
5151     case DM_DEV_STATUS:
5152     case DM_DEV_WAIT:
5153     case DM_TABLE_STATUS:
5154     case DM_TABLE_CLEAR:
5155     case DM_TABLE_DEPS:
5156     case DM_LIST_VERSIONS:
5157         /* no input data */
5158         break;
5159     case DM_DEV_RENAME:
5160     case DM_DEV_SET_GEOMETRY:
5161         /* data contains only strings */
5162         memcpy(host_data, argptr, guest_data_size);
5163         break;
5164     case DM_TARGET_MSG:
5165         memcpy(host_data, argptr, guest_data_size);
5166         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5167         break;
5168     case DM_TABLE_LOAD:
5169     {
5170         void *gspec = argptr;
5171         void *cur_data = host_data;
5172         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5173         int spec_size = thunk_type_size(arg_type, 0);
5174         int i;
5175 
5176         for (i = 0; i < host_dm->target_count; i++) {
5177             struct dm_target_spec *spec = cur_data;
5178             uint32_t next;
5179             int slen;
5180 
5181             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5182             slen = strlen((char*)gspec + spec_size) + 1;
5183             next = spec->next;
5184             spec->next = sizeof(*spec) + slen;
5185             strcpy((char*)&spec[1], gspec + spec_size);
5186             gspec += next;
5187             cur_data += spec->next;
5188         }
5189         break;
5190     }
5191     default:
5192         ret = -TARGET_EINVAL;
5193         unlock_user(argptr, guest_data, 0);
5194         goto out;
5195     }
5196     unlock_user(argptr, guest_data, 0);
5197 
5198     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5199     if (!is_error(ret)) {
5200         guest_data = arg + host_dm->data_start;
5201         guest_data_size = host_dm->data_size - host_dm->data_start;
5202         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5203         switch (ie->host_cmd) {
5204         case DM_REMOVE_ALL:
5205         case DM_DEV_CREATE:
5206         case DM_DEV_REMOVE:
5207         case DM_DEV_RENAME:
5208         case DM_DEV_SUSPEND:
5209         case DM_DEV_STATUS:
5210         case DM_TABLE_LOAD:
5211         case DM_TABLE_CLEAR:
5212         case DM_TARGET_MSG:
5213         case DM_DEV_SET_GEOMETRY:
5214             /* no return data */
5215             break;
5216         case DM_LIST_DEVICES:
5217         {
5218             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5219             uint32_t remaining_data = guest_data_size;
5220             void *cur_data = argptr;
5221             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5222             int nl_size = 12; /* can't use thunk_size due to alignment */
5223 
5224             while (1) {
5225                 uint32_t next = nl->next;
5226                 if (next) {
5227                     nl->next = nl_size + (strlen(nl->name) + 1);
5228                 }
5229                 if (remaining_data < nl->next) {
5230                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5231                     break;
5232                 }
5233                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5234                 strcpy(cur_data + nl_size, nl->name);
5235                 cur_data += nl->next;
5236                 remaining_data -= nl->next;
5237                 if (!next) {
5238                     break;
5239                 }
5240                 nl = (void*)nl + next;
5241             }
5242             break;
5243         }
5244         case DM_DEV_WAIT:
5245         case DM_TABLE_STATUS:
5246         {
5247             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5248             void *cur_data = argptr;
5249             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5250             int spec_size = thunk_type_size(arg_type, 0);
5251             int i;
5252 
5253             for (i = 0; i < host_dm->target_count; i++) {
5254                 uint32_t next = spec->next;
5255                 int slen = strlen((char*)&spec[1]) + 1;
5256                 spec->next = (cur_data - argptr) + spec_size + slen;
5257                 if (guest_data_size < spec->next) {
5258                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5259                     break;
5260                 }
5261                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5262                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5263                 cur_data = argptr + spec->next;
5264                 spec = (void*)host_dm + host_dm->data_start + next;
5265             }
5266             break;
5267         }
5268         case DM_TABLE_DEPS:
5269         {
5270             void *hdata = (void*)host_dm + host_dm->data_start;
5271             int count = *(uint32_t*)hdata;
5272             uint64_t *hdev = hdata + 8;
5273             uint64_t *gdev = argptr + 8;
5274             int i;
5275 
5276             *(uint32_t*)argptr = tswap32(count);
5277             for (i = 0; i < count; i++) {
5278                 *gdev = tswap64(*hdev);
5279                 gdev++;
5280                 hdev++;
5281             }
5282             break;
5283         }
5284         case DM_LIST_VERSIONS:
5285         {
5286             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5287             uint32_t remaining_data = guest_data_size;
5288             void *cur_data = argptr;
5289             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5290             int vers_size = thunk_type_size(arg_type, 0);
5291 
5292             while (1) {
5293                 uint32_t next = vers->next;
5294                 if (next) {
5295                     vers->next = vers_size + (strlen(vers->name) + 1);
5296                 }
5297                 if (remaining_data < vers->next) {
5298                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5299                     break;
5300                 }
5301                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5302                 strcpy(cur_data + vers_size, vers->name);
5303                 cur_data += vers->next;
5304                 remaining_data -= vers->next;
5305                 if (!next) {
5306                     break;
5307                 }
5308                 vers = (void*)vers + next;
5309             }
5310             break;
5311         }
5312         default:
5313             unlock_user(argptr, guest_data, 0);
5314             ret = -TARGET_EINVAL;
5315             goto out;
5316         }
5317         unlock_user(argptr, guest_data, guest_data_size);
5318 
5319         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5320         if (!argptr) {
5321             ret = -TARGET_EFAULT;
5322             goto out;
5323         }
5324         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5325         unlock_user(argptr, arg, target_size);
5326     }
5327 out:
5328     g_free(big_buf);
5329     return ret;
5330 }
5331 
5332 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5333                                int cmd, abi_long arg)
5334 {
5335     void *argptr;
5336     int target_size;
5337     const argtype *arg_type = ie->arg_type;
5338     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5339     abi_long ret;
5340 
5341     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5342     struct blkpg_partition host_part;
5343 
5344     /* Read and convert blkpg */
5345     arg_type++;
5346     target_size = thunk_type_size(arg_type, 0);
5347     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5348     if (!argptr) {
5349         ret = -TARGET_EFAULT;
5350         goto out;
5351     }
5352     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5353     unlock_user(argptr, arg, 0);
5354 
5355     switch (host_blkpg->op) {
5356     case BLKPG_ADD_PARTITION:
5357     case BLKPG_DEL_PARTITION:
5358         /* payload is struct blkpg_partition */
5359         break;
5360     default:
5361         /* Unknown opcode */
5362         ret = -TARGET_EINVAL;
5363         goto out;
5364     }
5365 
5366     /* Read and convert blkpg->data */
5367     arg = (abi_long)(uintptr_t)host_blkpg->data;
5368     target_size = thunk_type_size(part_arg_type, 0);
5369     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5370     if (!argptr) {
5371         ret = -TARGET_EFAULT;
5372         goto out;
5373     }
5374     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5375     unlock_user(argptr, arg, 0);
5376 
5377     /* Swizzle the data pointer to our local copy and call! */
5378     host_blkpg->data = &host_part;
5379     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5380 
5381 out:
5382     return ret;
5383 }
5384 
5385 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                 int fd, int cmd, abi_long arg)
5387 {
5388     const argtype *arg_type = ie->arg_type;
5389     const StructEntry *se;
5390     const argtype *field_types;
5391     const int *dst_offsets, *src_offsets;
5392     int target_size;
5393     void *argptr;
5394     abi_ulong *target_rt_dev_ptr = NULL;
5395     unsigned long *host_rt_dev_ptr = NULL;
5396     abi_long ret;
5397     int i;
5398 
5399     assert(ie->access == IOC_W);
5400     assert(*arg_type == TYPE_PTR);
5401     arg_type++;
5402     assert(*arg_type == TYPE_STRUCT);
5403     target_size = thunk_type_size(arg_type, 0);
5404     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5405     if (!argptr) {
5406         return -TARGET_EFAULT;
5407     }
5408     arg_type++;
5409     assert(*arg_type == (int)STRUCT_rtentry);
5410     se = struct_entries + *arg_type++;
5411     assert(se->convert[0] == NULL);
5412     /* convert struct here to be able to catch rt_dev string */
5413     field_types = se->field_types;
5414     dst_offsets = se->field_offsets[THUNK_HOST];
5415     src_offsets = se->field_offsets[THUNK_TARGET];
5416     for (i = 0; i < se->nb_fields; i++) {
5417         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5418             assert(*field_types == TYPE_PTRVOID);
5419             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5420             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5421             if (*target_rt_dev_ptr != 0) {
5422                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5423                                                   tswapal(*target_rt_dev_ptr));
5424                 if (!*host_rt_dev_ptr) {
5425                     unlock_user(argptr, arg, 0);
5426                     return -TARGET_EFAULT;
5427                 }
5428             } else {
5429                 *host_rt_dev_ptr = 0;
5430             }
5431             field_types++;
5432             continue;
5433         }
5434         field_types = thunk_convert(buf_temp + dst_offsets[i],
5435                                     argptr + src_offsets[i],
5436                                     field_types, THUNK_HOST);
5437     }
5438     unlock_user(argptr, arg, 0);
5439 
5440     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5441 
5442     assert(host_rt_dev_ptr != NULL);
5443     assert(target_rt_dev_ptr != NULL);
5444     if (*host_rt_dev_ptr != 0) {
5445         unlock_user((void *)*host_rt_dev_ptr,
5446                     *target_rt_dev_ptr, 0);
5447     }
5448     return ret;
5449 }
5450 
5451 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5452                                      int fd, int cmd, abi_long arg)
5453 {
5454     int sig = target_to_host_signal(arg);
5455     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5456 }
5457 
5458 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5459                                     int fd, int cmd, abi_long arg)
5460 {
5461     struct timeval tv;
5462     abi_long ret;
5463 
5464     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5465     if (is_error(ret)) {
5466         return ret;
5467     }
5468 
5469     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5470         if (copy_to_user_timeval(arg, &tv)) {
5471             return -TARGET_EFAULT;
5472         }
5473     } else {
5474         if (copy_to_user_timeval64(arg, &tv)) {
5475             return -TARGET_EFAULT;
5476         }
5477     }
5478 
5479     return ret;
5480 }
5481 
5482 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5483                                       int fd, int cmd, abi_long arg)
5484 {
5485     struct timespec ts;
5486     abi_long ret;
5487 
5488     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5489     if (is_error(ret)) {
5490         return ret;
5491     }
5492 
5493     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5494         if (host_to_target_timespec(arg, &ts)) {
5495             return -TARGET_EFAULT;
5496         }
5497     } else{
5498         if (host_to_target_timespec64(arg, &ts)) {
5499             return -TARGET_EFAULT;
5500         }
5501     }
5502 
5503     return ret;
5504 }
5505 
5506 #ifdef TIOCGPTPEER
5507 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5508                                      int fd, int cmd, abi_long arg)
5509 {
5510     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5511     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5512 }
5513 #endif
5514 
5515 #ifdef HAVE_DRM_H
5516 
5517 static void unlock_drm_version(struct drm_version *host_ver,
5518                                struct target_drm_version *target_ver,
5519                                bool copy)
5520 {
5521     unlock_user(host_ver->name, target_ver->name,
5522                                 copy ? host_ver->name_len : 0);
5523     unlock_user(host_ver->date, target_ver->date,
5524                                 copy ? host_ver->date_len : 0);
5525     unlock_user(host_ver->desc, target_ver->desc,
5526                                 copy ? host_ver->desc_len : 0);
5527 }
5528 
5529 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5530                                           struct target_drm_version *target_ver)
5531 {
5532     memset(host_ver, 0, sizeof(*host_ver));
5533 
5534     __get_user(host_ver->name_len, &target_ver->name_len);
5535     if (host_ver->name_len) {
5536         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5537                                    target_ver->name_len, 0);
5538         if (!host_ver->name) {
5539             return -EFAULT;
5540         }
5541     }
5542 
5543     __get_user(host_ver->date_len, &target_ver->date_len);
5544     if (host_ver->date_len) {
5545         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5546                                    target_ver->date_len, 0);
5547         if (!host_ver->date) {
5548             goto err;
5549         }
5550     }
5551 
5552     __get_user(host_ver->desc_len, &target_ver->desc_len);
5553     if (host_ver->desc_len) {
5554         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5555                                    target_ver->desc_len, 0);
5556         if (!host_ver->desc) {
5557             goto err;
5558         }
5559     }
5560 
5561     return 0;
5562 err:
5563     unlock_drm_version(host_ver, target_ver, false);
5564     return -EFAULT;
5565 }
5566 
5567 static inline void host_to_target_drmversion(
5568                                           struct target_drm_version *target_ver,
5569                                           struct drm_version *host_ver)
5570 {
5571     __put_user(host_ver->version_major, &target_ver->version_major);
5572     __put_user(host_ver->version_minor, &target_ver->version_minor);
5573     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5574     __put_user(host_ver->name_len, &target_ver->name_len);
5575     __put_user(host_ver->date_len, &target_ver->date_len);
5576     __put_user(host_ver->desc_len, &target_ver->desc_len);
5577     unlock_drm_version(host_ver, target_ver, true);
5578 }
5579 
5580 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5581                              int fd, int cmd, abi_long arg)
5582 {
5583     struct drm_version *ver;
5584     struct target_drm_version *target_ver;
5585     abi_long ret;
5586 
5587     switch (ie->host_cmd) {
5588     case DRM_IOCTL_VERSION:
5589         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5590             return -TARGET_EFAULT;
5591         }
5592         ver = (struct drm_version *)buf_temp;
5593         ret = target_to_host_drmversion(ver, target_ver);
5594         if (!is_error(ret)) {
5595             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5596             if (is_error(ret)) {
5597                 unlock_drm_version(ver, target_ver, false);
5598             } else {
5599                 host_to_target_drmversion(target_ver, ver);
5600             }
5601         }
5602         unlock_user_struct(target_ver, arg, 0);
5603         return ret;
5604     }
5605     return -TARGET_ENOSYS;
5606 }
5607 
5608 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5609                                            struct drm_i915_getparam *gparam,
5610                                            int fd, abi_long arg)
5611 {
5612     abi_long ret;
5613     int value;
5614     struct target_drm_i915_getparam *target_gparam;
5615 
5616     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5617         return -TARGET_EFAULT;
5618     }
5619 
5620     __get_user(gparam->param, &target_gparam->param);
5621     gparam->value = &value;
5622     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5623     put_user_s32(value, target_gparam->value);
5624 
5625     unlock_user_struct(target_gparam, arg, 0);
5626     return ret;
5627 }
5628 
5629 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5630                                   int fd, int cmd, abi_long arg)
5631 {
5632     switch (ie->host_cmd) {
5633     case DRM_IOCTL_I915_GETPARAM:
5634         return do_ioctl_drm_i915_getparam(ie,
5635                                           (struct drm_i915_getparam *)buf_temp,
5636                                           fd, arg);
5637     default:
5638         return -TARGET_ENOSYS;
5639     }
5640 }
5641 
5642 #endif
5643 
5644 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5645                                         int fd, int cmd, abi_long arg)
5646 {
5647     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5648     struct tun_filter *target_filter;
5649     char *target_addr;
5650 
5651     assert(ie->access == IOC_W);
5652 
5653     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5654     if (!target_filter) {
5655         return -TARGET_EFAULT;
5656     }
5657     filter->flags = tswap16(target_filter->flags);
5658     filter->count = tswap16(target_filter->count);
5659     unlock_user(target_filter, arg, 0);
5660 
5661     if (filter->count) {
5662         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5663             MAX_STRUCT_SIZE) {
5664             return -TARGET_EFAULT;
5665         }
5666 
5667         target_addr = lock_user(VERIFY_READ,
5668                                 arg + offsetof(struct tun_filter, addr),
5669                                 filter->count * ETH_ALEN, 1);
5670         if (!target_addr) {
5671             return -TARGET_EFAULT;
5672         }
5673         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5674         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5675     }
5676 
5677     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5678 }
5679 
5680 IOCTLEntry ioctl_entries[] = {
5681 #define IOCTL(cmd, access, ...) \
5682     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5683 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5684     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5685 #define IOCTL_IGNORE(cmd) \
5686     { TARGET_ ## cmd, 0, #cmd },
5687 #include "ioctls.h"
5688     { 0, 0, },
5689 };
5690 
5691 /* ??? Implement proper locking for ioctls.  */
5692 /* do_ioctl() Must return target values and target errnos. */
5693 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5694 {
5695     const IOCTLEntry *ie;
5696     const argtype *arg_type;
5697     abi_long ret;
5698     uint8_t buf_temp[MAX_STRUCT_SIZE];
5699     int target_size;
5700     void *argptr;
5701 
5702     ie = ioctl_entries;
5703     for(;;) {
5704         if (ie->target_cmd == 0) {
5705             qemu_log_mask(
5706                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5707             return -TARGET_ENOSYS;
5708         }
5709         if (ie->target_cmd == cmd)
5710             break;
5711         ie++;
5712     }
5713     arg_type = ie->arg_type;
5714     if (ie->do_ioctl) {
5715         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5716     } else if (!ie->host_cmd) {
5717         /* Some architectures define BSD ioctls in their headers
5718            that are not implemented in Linux.  */
5719         return -TARGET_ENOSYS;
5720     }
5721 
5722     switch(arg_type[0]) {
5723     case TYPE_NULL:
5724         /* no argument */
5725         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5726         break;
5727     case TYPE_PTRVOID:
5728     case TYPE_INT:
5729     case TYPE_LONG:
5730     case TYPE_ULONG:
5731         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5732         break;
5733     case TYPE_PTR:
5734         arg_type++;
5735         target_size = thunk_type_size(arg_type, 0);
5736         switch(ie->access) {
5737         case IOC_R:
5738             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5739             if (!is_error(ret)) {
5740                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5741                 if (!argptr)
5742                     return -TARGET_EFAULT;
5743                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5744                 unlock_user(argptr, arg, target_size);
5745             }
5746             break;
5747         case IOC_W:
5748             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5749             if (!argptr)
5750                 return -TARGET_EFAULT;
5751             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5752             unlock_user(argptr, arg, 0);
5753             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5754             break;
5755         default:
5756         case IOC_RW:
5757             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5758             if (!argptr)
5759                 return -TARGET_EFAULT;
5760             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5761             unlock_user(argptr, arg, 0);
5762             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5763             if (!is_error(ret)) {
5764                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5765                 if (!argptr)
5766                     return -TARGET_EFAULT;
5767                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5768                 unlock_user(argptr, arg, target_size);
5769             }
5770             break;
5771         }
5772         break;
5773     default:
5774         qemu_log_mask(LOG_UNIMP,
5775                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5776                       (long)cmd, arg_type[0]);
5777         ret = -TARGET_ENOSYS;
5778         break;
5779     }
5780     return ret;
5781 }
5782 
5783 static const bitmask_transtbl iflag_tbl[] = {
5784         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5785         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5786         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5787         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5788         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5789         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5790         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5791         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5792         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5793         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5794         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5795         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5796         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5797         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5798         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5799         { 0, 0, 0, 0 }
5800 };
5801 
5802 static const bitmask_transtbl oflag_tbl[] = {
5803 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5804 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5805 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5806 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5807 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5808 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5809 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5810 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5811 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5812 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5813 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5814 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5815 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5816 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5817 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5818 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5819 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5820 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5821 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5822 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5823 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5824 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5825 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5826 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5827 	{ 0, 0, 0, 0 }
5828 };
5829 
5830 static const bitmask_transtbl cflag_tbl[] = {
5831 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5832 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5833 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5834 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5835 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5836 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5837 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5838 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5839 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5840 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5841 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5842 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5843 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5844 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5845 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5846 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5847 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5848 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5849 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5850 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5851 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5852 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5853 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5854 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5855 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5856 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5857 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5858 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5859 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5860 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5861 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5862 	{ 0, 0, 0, 0 }
5863 };
5864 
5865 static const bitmask_transtbl lflag_tbl[] = {
5866   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5867   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5868   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5869   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5870   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5871   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5872   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5873   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5874   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5875   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5876   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5877   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5878   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5879   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5880   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5881   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5882   { 0, 0, 0, 0 }
5883 };
5884 
5885 static void target_to_host_termios (void *dst, const void *src)
5886 {
5887     struct host_termios *host = dst;
5888     const struct target_termios *target = src;
5889 
5890     host->c_iflag =
5891         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5892     host->c_oflag =
5893         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5894     host->c_cflag =
5895         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5896     host->c_lflag =
5897         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5898     host->c_line = target->c_line;
5899 
5900     memset(host->c_cc, 0, sizeof(host->c_cc));
5901     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5902     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5903     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5904     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5905     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5906     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5907     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5908     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5909     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5910     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5911     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5912     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5913     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5914     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5915     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5916     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5917     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5918 }
5919 
5920 static void host_to_target_termios (void *dst, const void *src)
5921 {
5922     struct target_termios *target = dst;
5923     const struct host_termios *host = src;
5924 
5925     target->c_iflag =
5926         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5927     target->c_oflag =
5928         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5929     target->c_cflag =
5930         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5931     target->c_lflag =
5932         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5933     target->c_line = host->c_line;
5934 
5935     memset(target->c_cc, 0, sizeof(target->c_cc));
5936     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5937     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5938     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5939     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5940     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5941     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5942     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5943     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5944     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5945     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5946     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5947     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5948     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5949     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5950     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5951     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5952     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5953 }
5954 
5955 static const StructEntry struct_termios_def = {
5956     .convert = { host_to_target_termios, target_to_host_termios },
5957     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5958     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5959     .print = print_termios,
5960 };
5961 
5962 static const bitmask_transtbl mmap_flags_tbl[] = {
5963     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5964     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5965     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5966     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5967       MAP_ANONYMOUS, MAP_ANONYMOUS },
5968     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5969       MAP_GROWSDOWN, MAP_GROWSDOWN },
5970     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5971       MAP_DENYWRITE, MAP_DENYWRITE },
5972     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5973       MAP_EXECUTABLE, MAP_EXECUTABLE },
5974     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5975     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5976       MAP_NORESERVE, MAP_NORESERVE },
5977     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5978     /* MAP_STACK had been ignored by the kernel for quite some time.
5979        Recognize it for the target insofar as we do not want to pass
5980        it through to the host.  */
5981     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5982     { 0, 0, 0, 0 }
5983 };
5984 
5985 /*
5986  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5987  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5988  */
5989 #if defined(TARGET_I386)
5990 
5991 /* NOTE: there is really one LDT for all the threads */
5992 static uint8_t *ldt_table;
5993 
5994 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5995 {
5996     int size;
5997     void *p;
5998 
5999     if (!ldt_table)
6000         return 0;
6001     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6002     if (size > bytecount)
6003         size = bytecount;
6004     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6005     if (!p)
6006         return -TARGET_EFAULT;
6007     /* ??? Should this by byteswapped?  */
6008     memcpy(p, ldt_table, size);
6009     unlock_user(p, ptr, size);
6010     return size;
6011 }
6012 
6013 /* XXX: add locking support */
6014 static abi_long write_ldt(CPUX86State *env,
6015                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6016 {
6017     struct target_modify_ldt_ldt_s ldt_info;
6018     struct target_modify_ldt_ldt_s *target_ldt_info;
6019     int seg_32bit, contents, read_exec_only, limit_in_pages;
6020     int seg_not_present, useable, lm;
6021     uint32_t *lp, entry_1, entry_2;
6022 
6023     if (bytecount != sizeof(ldt_info))
6024         return -TARGET_EINVAL;
6025     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6026         return -TARGET_EFAULT;
6027     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6028     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6029     ldt_info.limit = tswap32(target_ldt_info->limit);
6030     ldt_info.flags = tswap32(target_ldt_info->flags);
6031     unlock_user_struct(target_ldt_info, ptr, 0);
6032 
6033     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6034         return -TARGET_EINVAL;
6035     seg_32bit = ldt_info.flags & 1;
6036     contents = (ldt_info.flags >> 1) & 3;
6037     read_exec_only = (ldt_info.flags >> 3) & 1;
6038     limit_in_pages = (ldt_info.flags >> 4) & 1;
6039     seg_not_present = (ldt_info.flags >> 5) & 1;
6040     useable = (ldt_info.flags >> 6) & 1;
6041 #ifdef TARGET_ABI32
6042     lm = 0;
6043 #else
6044     lm = (ldt_info.flags >> 7) & 1;
6045 #endif
6046     if (contents == 3) {
6047         if (oldmode)
6048             return -TARGET_EINVAL;
6049         if (seg_not_present == 0)
6050             return -TARGET_EINVAL;
6051     }
6052     /* allocate the LDT */
6053     if (!ldt_table) {
6054         env->ldt.base = target_mmap(0,
6055                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6056                                     PROT_READ|PROT_WRITE,
6057                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6058         if (env->ldt.base == -1)
6059             return -TARGET_ENOMEM;
6060         memset(g2h_untagged(env->ldt.base), 0,
6061                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6062         env->ldt.limit = 0xffff;
6063         ldt_table = g2h_untagged(env->ldt.base);
6064     }
6065 
6066     /* NOTE: same code as Linux kernel */
6067     /* Allow LDTs to be cleared by the user. */
6068     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6069         if (oldmode ||
6070             (contents == 0		&&
6071              read_exec_only == 1	&&
6072              seg_32bit == 0		&&
6073              limit_in_pages == 0	&&
6074              seg_not_present == 1	&&
6075              useable == 0 )) {
6076             entry_1 = 0;
6077             entry_2 = 0;
6078             goto install;
6079         }
6080     }
6081 
6082     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6083         (ldt_info.limit & 0x0ffff);
6084     entry_2 = (ldt_info.base_addr & 0xff000000) |
6085         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6086         (ldt_info.limit & 0xf0000) |
6087         ((read_exec_only ^ 1) << 9) |
6088         (contents << 10) |
6089         ((seg_not_present ^ 1) << 15) |
6090         (seg_32bit << 22) |
6091         (limit_in_pages << 23) |
6092         (lm << 21) |
6093         0x7000;
6094     if (!oldmode)
6095         entry_2 |= (useable << 20);
6096 
6097     /* Install the new entry ...  */
6098 install:
6099     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6100     lp[0] = tswap32(entry_1);
6101     lp[1] = tswap32(entry_2);
6102     return 0;
6103 }
6104 
6105 /* specific and weird i386 syscalls */
6106 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6107                               unsigned long bytecount)
6108 {
6109     abi_long ret;
6110 
6111     switch (func) {
6112     case 0:
6113         ret = read_ldt(ptr, bytecount);
6114         break;
6115     case 1:
6116         ret = write_ldt(env, ptr, bytecount, 1);
6117         break;
6118     case 0x11:
6119         ret = write_ldt(env, ptr, bytecount, 0);
6120         break;
6121     default:
6122         ret = -TARGET_ENOSYS;
6123         break;
6124     }
6125     return ret;
6126 }
6127 
6128 #if defined(TARGET_ABI32)
6129 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6130 {
6131     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6132     struct target_modify_ldt_ldt_s ldt_info;
6133     struct target_modify_ldt_ldt_s *target_ldt_info;
6134     int seg_32bit, contents, read_exec_only, limit_in_pages;
6135     int seg_not_present, useable, lm;
6136     uint32_t *lp, entry_1, entry_2;
6137     int i;
6138 
6139     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6140     if (!target_ldt_info)
6141         return -TARGET_EFAULT;
6142     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6143     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6144     ldt_info.limit = tswap32(target_ldt_info->limit);
6145     ldt_info.flags = tswap32(target_ldt_info->flags);
6146     if (ldt_info.entry_number == -1) {
6147         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6148             if (gdt_table[i] == 0) {
6149                 ldt_info.entry_number = i;
6150                 target_ldt_info->entry_number = tswap32(i);
6151                 break;
6152             }
6153         }
6154     }
6155     unlock_user_struct(target_ldt_info, ptr, 1);
6156 
6157     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6158         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6159            return -TARGET_EINVAL;
6160     seg_32bit = ldt_info.flags & 1;
6161     contents = (ldt_info.flags >> 1) & 3;
6162     read_exec_only = (ldt_info.flags >> 3) & 1;
6163     limit_in_pages = (ldt_info.flags >> 4) & 1;
6164     seg_not_present = (ldt_info.flags >> 5) & 1;
6165     useable = (ldt_info.flags >> 6) & 1;
6166 #ifdef TARGET_ABI32
6167     lm = 0;
6168 #else
6169     lm = (ldt_info.flags >> 7) & 1;
6170 #endif
6171 
6172     if (contents == 3) {
6173         if (seg_not_present == 0)
6174             return -TARGET_EINVAL;
6175     }
6176 
6177     /* NOTE: same code as Linux kernel */
6178     /* Allow LDTs to be cleared by the user. */
6179     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6180         if ((contents == 0             &&
6181              read_exec_only == 1       &&
6182              seg_32bit == 0            &&
6183              limit_in_pages == 0       &&
6184              seg_not_present == 1      &&
6185              useable == 0 )) {
6186             entry_1 = 0;
6187             entry_2 = 0;
6188             goto install;
6189         }
6190     }
6191 
6192     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6193         (ldt_info.limit & 0x0ffff);
6194     entry_2 = (ldt_info.base_addr & 0xff000000) |
6195         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6196         (ldt_info.limit & 0xf0000) |
6197         ((read_exec_only ^ 1) << 9) |
6198         (contents << 10) |
6199         ((seg_not_present ^ 1) << 15) |
6200         (seg_32bit << 22) |
6201         (limit_in_pages << 23) |
6202         (useable << 20) |
6203         (lm << 21) |
6204         0x7000;
6205 
6206     /* Install the new entry ...  */
6207 install:
6208     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6209     lp[0] = tswap32(entry_1);
6210     lp[1] = tswap32(entry_2);
6211     return 0;
6212 }
6213 
6214 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6215 {
6216     struct target_modify_ldt_ldt_s *target_ldt_info;
6217     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6218     uint32_t base_addr, limit, flags;
6219     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6220     int seg_not_present, useable, lm;
6221     uint32_t *lp, entry_1, entry_2;
6222 
6223     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6224     if (!target_ldt_info)
6225         return -TARGET_EFAULT;
6226     idx = tswap32(target_ldt_info->entry_number);
6227     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6228         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6229         unlock_user_struct(target_ldt_info, ptr, 1);
6230         return -TARGET_EINVAL;
6231     }
6232     lp = (uint32_t *)(gdt_table + idx);
6233     entry_1 = tswap32(lp[0]);
6234     entry_2 = tswap32(lp[1]);
6235 
6236     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6237     contents = (entry_2 >> 10) & 3;
6238     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6239     seg_32bit = (entry_2 >> 22) & 1;
6240     limit_in_pages = (entry_2 >> 23) & 1;
6241     useable = (entry_2 >> 20) & 1;
6242 #ifdef TARGET_ABI32
6243     lm = 0;
6244 #else
6245     lm = (entry_2 >> 21) & 1;
6246 #endif
6247     flags = (seg_32bit << 0) | (contents << 1) |
6248         (read_exec_only << 3) | (limit_in_pages << 4) |
6249         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6250     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6251     base_addr = (entry_1 >> 16) |
6252         (entry_2 & 0xff000000) |
6253         ((entry_2 & 0xff) << 16);
6254     target_ldt_info->base_addr = tswapal(base_addr);
6255     target_ldt_info->limit = tswap32(limit);
6256     target_ldt_info->flags = tswap32(flags);
6257     unlock_user_struct(target_ldt_info, ptr, 1);
6258     return 0;
6259 }
6260 
6261 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6262 {
6263     return -TARGET_ENOSYS;
6264 }
6265 #else
6266 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6267 {
6268     abi_long ret = 0;
6269     abi_ulong val;
6270     int idx;
6271 
6272     switch(code) {
6273     case TARGET_ARCH_SET_GS:
6274     case TARGET_ARCH_SET_FS:
6275         if (code == TARGET_ARCH_SET_GS)
6276             idx = R_GS;
6277         else
6278             idx = R_FS;
6279         cpu_x86_load_seg(env, idx, 0);
6280         env->segs[idx].base = addr;
6281         break;
6282     case TARGET_ARCH_GET_GS:
6283     case TARGET_ARCH_GET_FS:
6284         if (code == TARGET_ARCH_GET_GS)
6285             idx = R_GS;
6286         else
6287             idx = R_FS;
6288         val = env->segs[idx].base;
6289         if (put_user(val, addr, abi_ulong))
6290             ret = -TARGET_EFAULT;
6291         break;
6292     default:
6293         ret = -TARGET_EINVAL;
6294         break;
6295     }
6296     return ret;
6297 }
6298 #endif /* defined(TARGET_ABI32 */
6299 #endif /* defined(TARGET_I386) */
6300 
6301 /*
6302  * These constants are generic.  Supply any that are missing from the host.
6303  */
6304 #ifndef PR_SET_NAME
6305 # define PR_SET_NAME    15
6306 # define PR_GET_NAME    16
6307 #endif
6308 #ifndef PR_SET_FP_MODE
6309 # define PR_SET_FP_MODE 45
6310 # define PR_GET_FP_MODE 46
6311 # define PR_FP_MODE_FR   (1 << 0)
6312 # define PR_FP_MODE_FRE  (1 << 1)
6313 #endif
6314 #ifndef PR_SVE_SET_VL
6315 # define PR_SVE_SET_VL  50
6316 # define PR_SVE_GET_VL  51
6317 # define PR_SVE_VL_LEN_MASK  0xffff
6318 # define PR_SVE_VL_INHERIT   (1 << 17)
6319 #endif
6320 #ifndef PR_PAC_RESET_KEYS
6321 # define PR_PAC_RESET_KEYS  54
6322 # define PR_PAC_APIAKEY   (1 << 0)
6323 # define PR_PAC_APIBKEY   (1 << 1)
6324 # define PR_PAC_APDAKEY   (1 << 2)
6325 # define PR_PAC_APDBKEY   (1 << 3)
6326 # define PR_PAC_APGAKEY   (1 << 4)
6327 #endif
6328 #ifndef PR_SET_TAGGED_ADDR_CTRL
6329 # define PR_SET_TAGGED_ADDR_CTRL 55
6330 # define PR_GET_TAGGED_ADDR_CTRL 56
6331 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6332 #endif
6333 #ifndef PR_MTE_TCF_SHIFT
6334 # define PR_MTE_TCF_SHIFT       1
6335 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6337 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6338 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6339 # define PR_MTE_TAG_SHIFT       3
6340 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6341 #endif
6342 #ifndef PR_SET_IO_FLUSHER
6343 # define PR_SET_IO_FLUSHER 57
6344 # define PR_GET_IO_FLUSHER 58
6345 #endif
6346 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6347 # define PR_SET_SYSCALL_USER_DISPATCH 59
6348 #endif
6349 
6350 #include "target_prctl.h"
6351 
6352 static abi_long do_prctl_inval0(CPUArchState *env)
6353 {
6354     return -TARGET_EINVAL;
6355 }
6356 
6357 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6358 {
6359     return -TARGET_EINVAL;
6360 }
6361 
6362 #ifndef do_prctl_get_fp_mode
6363 #define do_prctl_get_fp_mode do_prctl_inval0
6364 #endif
6365 #ifndef do_prctl_set_fp_mode
6366 #define do_prctl_set_fp_mode do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_get_vl
6369 #define do_prctl_get_vl do_prctl_inval0
6370 #endif
6371 #ifndef do_prctl_set_vl
6372 #define do_prctl_set_vl do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_reset_keys
6375 #define do_prctl_reset_keys do_prctl_inval1
6376 #endif
6377 #ifndef do_prctl_set_tagged_addr_ctrl
6378 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_get_tagged_addr_ctrl
6381 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_get_unalign
6384 #define do_prctl_get_unalign do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_set_unalign
6387 #define do_prctl_set_unalign do_prctl_inval1
6388 #endif
6389 
6390 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6391                          abi_long arg3, abi_long arg4, abi_long arg5)
6392 {
6393     abi_long ret;
6394 
6395     switch (option) {
6396     case PR_GET_PDEATHSIG:
6397         {
6398             int deathsig;
6399             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6400                                   arg3, arg4, arg5));
6401             if (!is_error(ret) &&
6402                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6403                 return -TARGET_EFAULT;
6404             }
6405             return ret;
6406         }
6407     case PR_SET_PDEATHSIG:
6408         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6409                                arg3, arg4, arg5));
6410     case PR_GET_NAME:
6411         {
6412             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6413             if (!name) {
6414                 return -TARGET_EFAULT;
6415             }
6416             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6417                                   arg3, arg4, arg5));
6418             unlock_user(name, arg2, 16);
6419             return ret;
6420         }
6421     case PR_SET_NAME:
6422         {
6423             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6424             if (!name) {
6425                 return -TARGET_EFAULT;
6426             }
6427             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6428                                   arg3, arg4, arg5));
6429             unlock_user(name, arg2, 0);
6430             return ret;
6431         }
6432     case PR_GET_FP_MODE:
6433         return do_prctl_get_fp_mode(env);
6434     case PR_SET_FP_MODE:
6435         return do_prctl_set_fp_mode(env, arg2);
6436     case PR_SVE_GET_VL:
6437         return do_prctl_get_vl(env);
6438     case PR_SVE_SET_VL:
6439         return do_prctl_set_vl(env, arg2);
6440     case PR_PAC_RESET_KEYS:
6441         if (arg3 || arg4 || arg5) {
6442             return -TARGET_EINVAL;
6443         }
6444         return do_prctl_reset_keys(env, arg2);
6445     case PR_SET_TAGGED_ADDR_CTRL:
6446         if (arg3 || arg4 || arg5) {
6447             return -TARGET_EINVAL;
6448         }
6449         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6450     case PR_GET_TAGGED_ADDR_CTRL:
6451         if (arg2 || arg3 || arg4 || arg5) {
6452             return -TARGET_EINVAL;
6453         }
6454         return do_prctl_get_tagged_addr_ctrl(env);
6455 
6456     case PR_GET_UNALIGN:
6457         return do_prctl_get_unalign(env, arg2);
6458     case PR_SET_UNALIGN:
6459         return do_prctl_set_unalign(env, arg2);
6460 
6461     case PR_CAP_AMBIENT:
6462     case PR_CAPBSET_READ:
6463     case PR_CAPBSET_DROP:
6464     case PR_GET_DUMPABLE:
6465     case PR_SET_DUMPABLE:
6466     case PR_GET_KEEPCAPS:
6467     case PR_SET_KEEPCAPS:
6468     case PR_GET_SECUREBITS:
6469     case PR_SET_SECUREBITS:
6470     case PR_GET_TIMING:
6471     case PR_SET_TIMING:
6472     case PR_GET_TIMERSLACK:
6473     case PR_SET_TIMERSLACK:
6474     case PR_MCE_KILL:
6475     case PR_MCE_KILL_GET:
6476     case PR_GET_NO_NEW_PRIVS:
6477     case PR_SET_NO_NEW_PRIVS:
6478     case PR_GET_IO_FLUSHER:
6479     case PR_SET_IO_FLUSHER:
6480         /* Some prctl options have no pointer arguments and we can pass on. */
6481         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6482 
6483     case PR_GET_CHILD_SUBREAPER:
6484     case PR_SET_CHILD_SUBREAPER:
6485     case PR_GET_SPECULATION_CTRL:
6486     case PR_SET_SPECULATION_CTRL:
6487     case PR_GET_TID_ADDRESS:
6488         /* TODO */
6489         return -TARGET_EINVAL;
6490 
6491     case PR_GET_FPEXC:
6492     case PR_SET_FPEXC:
6493         /* Was used for SPE on PowerPC. */
6494         return -TARGET_EINVAL;
6495 
6496     case PR_GET_ENDIAN:
6497     case PR_SET_ENDIAN:
6498     case PR_GET_FPEMU:
6499     case PR_SET_FPEMU:
6500     case PR_SET_MM:
6501     case PR_GET_SECCOMP:
6502     case PR_SET_SECCOMP:
6503     case PR_SET_SYSCALL_USER_DISPATCH:
6504     case PR_GET_THP_DISABLE:
6505     case PR_SET_THP_DISABLE:
6506     case PR_GET_TSC:
6507     case PR_SET_TSC:
6508         /* Disable to prevent the target disabling stuff we need. */
6509         return -TARGET_EINVAL;
6510 
6511     default:
6512         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6513                       option);
6514         return -TARGET_EINVAL;
6515     }
6516 }
6517 
6518 #define NEW_STACK_SIZE 0x40000
6519 
6520 
6521 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6522 typedef struct {
6523     CPUArchState *env;
6524     pthread_mutex_t mutex;
6525     pthread_cond_t cond;
6526     pthread_t thread;
6527     uint32_t tid;
6528     abi_ulong child_tidptr;
6529     abi_ulong parent_tidptr;
6530     sigset_t sigmask;
6531 } new_thread_info;
6532 
6533 static void *clone_func(void *arg)
6534 {
6535     new_thread_info *info = arg;
6536     CPUArchState *env;
6537     CPUState *cpu;
6538     TaskState *ts;
6539 
6540     rcu_register_thread();
6541     tcg_register_thread();
6542     env = info->env;
6543     cpu = env_cpu(env);
6544     thread_cpu = cpu;
6545     ts = (TaskState *)cpu->opaque;
6546     info->tid = sys_gettid();
6547     task_settid(ts);
6548     if (info->child_tidptr)
6549         put_user_u32(info->tid, info->child_tidptr);
6550     if (info->parent_tidptr)
6551         put_user_u32(info->tid, info->parent_tidptr);
6552     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6553     /* Enable signals.  */
6554     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6555     /* Signal to the parent that we're ready.  */
6556     pthread_mutex_lock(&info->mutex);
6557     pthread_cond_broadcast(&info->cond);
6558     pthread_mutex_unlock(&info->mutex);
6559     /* Wait until the parent has finished initializing the tls state.  */
6560     pthread_mutex_lock(&clone_lock);
6561     pthread_mutex_unlock(&clone_lock);
6562     cpu_loop(env);
6563     /* never exits */
6564     return NULL;
6565 }
6566 
6567 /* do_fork() Must return host values and target errnos (unlike most
6568    do_*() functions). */
6569 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6570                    abi_ulong parent_tidptr, target_ulong newtls,
6571                    abi_ulong child_tidptr)
6572 {
6573     CPUState *cpu = env_cpu(env);
6574     int ret;
6575     TaskState *ts;
6576     CPUState *new_cpu;
6577     CPUArchState *new_env;
6578     sigset_t sigmask;
6579 
6580     flags &= ~CLONE_IGNORED_FLAGS;
6581 
6582     /* Emulate vfork() with fork() */
6583     if (flags & CLONE_VFORK)
6584         flags &= ~(CLONE_VFORK | CLONE_VM);
6585 
6586     if (flags & CLONE_VM) {
6587         TaskState *parent_ts = (TaskState *)cpu->opaque;
6588         new_thread_info info;
6589         pthread_attr_t attr;
6590 
6591         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6592             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6593             return -TARGET_EINVAL;
6594         }
6595 
6596         ts = g_new0(TaskState, 1);
6597         init_task_state(ts);
6598 
6599         /* Grab a mutex so that thread setup appears atomic.  */
6600         pthread_mutex_lock(&clone_lock);
6601 
6602         /*
6603          * If this is our first additional thread, we need to ensure we
6604          * generate code for parallel execution and flush old translations.
6605          * Do this now so that the copy gets CF_PARALLEL too.
6606          */
6607         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6608             cpu->tcg_cflags |= CF_PARALLEL;
6609             tb_flush(cpu);
6610         }
6611 
6612         /* we create a new CPU instance. */
6613         new_env = cpu_copy(env);
6614         /* Init regs that differ from the parent.  */
6615         cpu_clone_regs_child(new_env, newsp, flags);
6616         cpu_clone_regs_parent(env, flags);
6617         new_cpu = env_cpu(new_env);
6618         new_cpu->opaque = ts;
6619         ts->bprm = parent_ts->bprm;
6620         ts->info = parent_ts->info;
6621         ts->signal_mask = parent_ts->signal_mask;
6622 
6623         if (flags & CLONE_CHILD_CLEARTID) {
6624             ts->child_tidptr = child_tidptr;
6625         }
6626 
6627         if (flags & CLONE_SETTLS) {
6628             cpu_set_tls (new_env, newtls);
6629         }
6630 
6631         memset(&info, 0, sizeof(info));
6632         pthread_mutex_init(&info.mutex, NULL);
6633         pthread_mutex_lock(&info.mutex);
6634         pthread_cond_init(&info.cond, NULL);
6635         info.env = new_env;
6636         if (flags & CLONE_CHILD_SETTID) {
6637             info.child_tidptr = child_tidptr;
6638         }
6639         if (flags & CLONE_PARENT_SETTID) {
6640             info.parent_tidptr = parent_tidptr;
6641         }
6642 
6643         ret = pthread_attr_init(&attr);
6644         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6645         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6646         /* It is not safe to deliver signals until the child has finished
6647            initializing, so temporarily block all signals.  */
6648         sigfillset(&sigmask);
6649         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6650         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6651 
6652         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6653         /* TODO: Free new CPU state if thread creation failed.  */
6654 
6655         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6656         pthread_attr_destroy(&attr);
6657         if (ret == 0) {
6658             /* Wait for the child to initialize.  */
6659             pthread_cond_wait(&info.cond, &info.mutex);
6660             ret = info.tid;
6661         } else {
6662             ret = -1;
6663         }
6664         pthread_mutex_unlock(&info.mutex);
6665         pthread_cond_destroy(&info.cond);
6666         pthread_mutex_destroy(&info.mutex);
6667         pthread_mutex_unlock(&clone_lock);
6668     } else {
6669         /* if no CLONE_VM, we consider it is a fork */
6670         if (flags & CLONE_INVALID_FORK_FLAGS) {
6671             return -TARGET_EINVAL;
6672         }
6673 
6674         /* We can't support custom termination signals */
6675         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6676             return -TARGET_EINVAL;
6677         }
6678 
6679         if (block_signals()) {
6680             return -QEMU_ERESTARTSYS;
6681         }
6682 
6683         fork_start();
6684         ret = fork();
6685         if (ret == 0) {
6686             /* Child Process.  */
6687             cpu_clone_regs_child(env, newsp, flags);
6688             fork_end(1);
6689             /* There is a race condition here.  The parent process could
6690                theoretically read the TID in the child process before the child
6691                tid is set.  This would require using either ptrace
6692                (not implemented) or having *_tidptr to point at a shared memory
6693                mapping.  We can't repeat the spinlock hack used above because
6694                the child process gets its own copy of the lock.  */
6695             if (flags & CLONE_CHILD_SETTID)
6696                 put_user_u32(sys_gettid(), child_tidptr);
6697             if (flags & CLONE_PARENT_SETTID)
6698                 put_user_u32(sys_gettid(), parent_tidptr);
6699             ts = (TaskState *)cpu->opaque;
6700             if (flags & CLONE_SETTLS)
6701                 cpu_set_tls (env, newtls);
6702             if (flags & CLONE_CHILD_CLEARTID)
6703                 ts->child_tidptr = child_tidptr;
6704         } else {
6705             cpu_clone_regs_parent(env, flags);
6706             fork_end(0);
6707         }
6708     }
6709     return ret;
6710 }
6711 
6712 /* warning : doesn't handle linux specific flags... */
6713 static int target_to_host_fcntl_cmd(int cmd)
6714 {
6715     int ret;
6716 
6717     switch(cmd) {
6718     case TARGET_F_DUPFD:
6719     case TARGET_F_GETFD:
6720     case TARGET_F_SETFD:
6721     case TARGET_F_GETFL:
6722     case TARGET_F_SETFL:
6723     case TARGET_F_OFD_GETLK:
6724     case TARGET_F_OFD_SETLK:
6725     case TARGET_F_OFD_SETLKW:
6726         ret = cmd;
6727         break;
6728     case TARGET_F_GETLK:
6729         ret = F_GETLK64;
6730         break;
6731     case TARGET_F_SETLK:
6732         ret = F_SETLK64;
6733         break;
6734     case TARGET_F_SETLKW:
6735         ret = F_SETLKW64;
6736         break;
6737     case TARGET_F_GETOWN:
6738         ret = F_GETOWN;
6739         break;
6740     case TARGET_F_SETOWN:
6741         ret = F_SETOWN;
6742         break;
6743     case TARGET_F_GETSIG:
6744         ret = F_GETSIG;
6745         break;
6746     case TARGET_F_SETSIG:
6747         ret = F_SETSIG;
6748         break;
6749 #if TARGET_ABI_BITS == 32
6750     case TARGET_F_GETLK64:
6751         ret = F_GETLK64;
6752         break;
6753     case TARGET_F_SETLK64:
6754         ret = F_SETLK64;
6755         break;
6756     case TARGET_F_SETLKW64:
6757         ret = F_SETLKW64;
6758         break;
6759 #endif
6760     case TARGET_F_SETLEASE:
6761         ret = F_SETLEASE;
6762         break;
6763     case TARGET_F_GETLEASE:
6764         ret = F_GETLEASE;
6765         break;
6766 #ifdef F_DUPFD_CLOEXEC
6767     case TARGET_F_DUPFD_CLOEXEC:
6768         ret = F_DUPFD_CLOEXEC;
6769         break;
6770 #endif
6771     case TARGET_F_NOTIFY:
6772         ret = F_NOTIFY;
6773         break;
6774 #ifdef F_GETOWN_EX
6775     case TARGET_F_GETOWN_EX:
6776         ret = F_GETOWN_EX;
6777         break;
6778 #endif
6779 #ifdef F_SETOWN_EX
6780     case TARGET_F_SETOWN_EX:
6781         ret = F_SETOWN_EX;
6782         break;
6783 #endif
6784 #ifdef F_SETPIPE_SZ
6785     case TARGET_F_SETPIPE_SZ:
6786         ret = F_SETPIPE_SZ;
6787         break;
6788     case TARGET_F_GETPIPE_SZ:
6789         ret = F_GETPIPE_SZ;
6790         break;
6791 #endif
6792 #ifdef F_ADD_SEALS
6793     case TARGET_F_ADD_SEALS:
6794         ret = F_ADD_SEALS;
6795         break;
6796     case TARGET_F_GET_SEALS:
6797         ret = F_GET_SEALS;
6798         break;
6799 #endif
6800     default:
6801         ret = -TARGET_EINVAL;
6802         break;
6803     }
6804 
6805 #if defined(__powerpc64__)
6806     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6807      * is not supported by kernel. The glibc fcntl call actually adjusts
6808      * them to 5, 6 and 7 before making the syscall(). Since we make the
6809      * syscall directly, adjust to what is supported by the kernel.
6810      */
6811     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6812         ret -= F_GETLK64 - 5;
6813     }
6814 #endif
6815 
6816     return ret;
6817 }
6818 
6819 #define FLOCK_TRANSTBL \
6820     switch (type) { \
6821     TRANSTBL_CONVERT(F_RDLCK); \
6822     TRANSTBL_CONVERT(F_WRLCK); \
6823     TRANSTBL_CONVERT(F_UNLCK); \
6824     }
6825 
6826 static int target_to_host_flock(int type)
6827 {
6828 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6829     FLOCK_TRANSTBL
6830 #undef  TRANSTBL_CONVERT
6831     return -TARGET_EINVAL;
6832 }
6833 
6834 static int host_to_target_flock(int type)
6835 {
6836 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6837     FLOCK_TRANSTBL
6838 #undef  TRANSTBL_CONVERT
6839     /* if we don't know how to convert the value coming
6840      * from the host we copy to the target field as-is
6841      */
6842     return type;
6843 }
6844 
6845 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6846                                             abi_ulong target_flock_addr)
6847 {
6848     struct target_flock *target_fl;
6849     int l_type;
6850 
6851     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6852         return -TARGET_EFAULT;
6853     }
6854 
6855     __get_user(l_type, &target_fl->l_type);
6856     l_type = target_to_host_flock(l_type);
6857     if (l_type < 0) {
6858         return l_type;
6859     }
6860     fl->l_type = l_type;
6861     __get_user(fl->l_whence, &target_fl->l_whence);
6862     __get_user(fl->l_start, &target_fl->l_start);
6863     __get_user(fl->l_len, &target_fl->l_len);
6864     __get_user(fl->l_pid, &target_fl->l_pid);
6865     unlock_user_struct(target_fl, target_flock_addr, 0);
6866     return 0;
6867 }
6868 
6869 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6870                                           const struct flock64 *fl)
6871 {
6872     struct target_flock *target_fl;
6873     short l_type;
6874 
6875     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6876         return -TARGET_EFAULT;
6877     }
6878 
6879     l_type = host_to_target_flock(fl->l_type);
6880     __put_user(l_type, &target_fl->l_type);
6881     __put_user(fl->l_whence, &target_fl->l_whence);
6882     __put_user(fl->l_start, &target_fl->l_start);
6883     __put_user(fl->l_len, &target_fl->l_len);
6884     __put_user(fl->l_pid, &target_fl->l_pid);
6885     unlock_user_struct(target_fl, target_flock_addr, 1);
6886     return 0;
6887 }
6888 
6889 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6890 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6891 
6892 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6893 struct target_oabi_flock64 {
6894     abi_short l_type;
6895     abi_short l_whence;
6896     abi_llong l_start;
6897     abi_llong l_len;
6898     abi_int   l_pid;
6899 } QEMU_PACKED;
6900 
6901 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6902                                                    abi_ulong target_flock_addr)
6903 {
6904     struct target_oabi_flock64 *target_fl;
6905     int l_type;
6906 
6907     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6908         return -TARGET_EFAULT;
6909     }
6910 
6911     __get_user(l_type, &target_fl->l_type);
6912     l_type = target_to_host_flock(l_type);
6913     if (l_type < 0) {
6914         return l_type;
6915     }
6916     fl->l_type = l_type;
6917     __get_user(fl->l_whence, &target_fl->l_whence);
6918     __get_user(fl->l_start, &target_fl->l_start);
6919     __get_user(fl->l_len, &target_fl->l_len);
6920     __get_user(fl->l_pid, &target_fl->l_pid);
6921     unlock_user_struct(target_fl, target_flock_addr, 0);
6922     return 0;
6923 }
6924 
6925 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6926                                                  const struct flock64 *fl)
6927 {
6928     struct target_oabi_flock64 *target_fl;
6929     short l_type;
6930 
6931     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6932         return -TARGET_EFAULT;
6933     }
6934 
6935     l_type = host_to_target_flock(fl->l_type);
6936     __put_user(l_type, &target_fl->l_type);
6937     __put_user(fl->l_whence, &target_fl->l_whence);
6938     __put_user(fl->l_start, &target_fl->l_start);
6939     __put_user(fl->l_len, &target_fl->l_len);
6940     __put_user(fl->l_pid, &target_fl->l_pid);
6941     unlock_user_struct(target_fl, target_flock_addr, 1);
6942     return 0;
6943 }
6944 #endif
6945 
6946 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6947                                               abi_ulong target_flock_addr)
6948 {
6949     struct target_flock64 *target_fl;
6950     int l_type;
6951 
6952     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6953         return -TARGET_EFAULT;
6954     }
6955 
6956     __get_user(l_type, &target_fl->l_type);
6957     l_type = target_to_host_flock(l_type);
6958     if (l_type < 0) {
6959         return l_type;
6960     }
6961     fl->l_type = l_type;
6962     __get_user(fl->l_whence, &target_fl->l_whence);
6963     __get_user(fl->l_start, &target_fl->l_start);
6964     __get_user(fl->l_len, &target_fl->l_len);
6965     __get_user(fl->l_pid, &target_fl->l_pid);
6966     unlock_user_struct(target_fl, target_flock_addr, 0);
6967     return 0;
6968 }
6969 
6970 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6971                                             const struct flock64 *fl)
6972 {
6973     struct target_flock64 *target_fl;
6974     short l_type;
6975 
6976     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6977         return -TARGET_EFAULT;
6978     }
6979 
6980     l_type = host_to_target_flock(fl->l_type);
6981     __put_user(l_type, &target_fl->l_type);
6982     __put_user(fl->l_whence, &target_fl->l_whence);
6983     __put_user(fl->l_start, &target_fl->l_start);
6984     __put_user(fl->l_len, &target_fl->l_len);
6985     __put_user(fl->l_pid, &target_fl->l_pid);
6986     unlock_user_struct(target_fl, target_flock_addr, 1);
6987     return 0;
6988 }
6989 
6990 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6991 {
6992     struct flock64 fl64;
6993 #ifdef F_GETOWN_EX
6994     struct f_owner_ex fox;
6995     struct target_f_owner_ex *target_fox;
6996 #endif
6997     abi_long ret;
6998     int host_cmd = target_to_host_fcntl_cmd(cmd);
6999 
7000     if (host_cmd == -TARGET_EINVAL)
7001 	    return host_cmd;
7002 
7003     switch(cmd) {
7004     case TARGET_F_GETLK:
7005         ret = copy_from_user_flock(&fl64, arg);
7006         if (ret) {
7007             return ret;
7008         }
7009         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7010         if (ret == 0) {
7011             ret = copy_to_user_flock(arg, &fl64);
7012         }
7013         break;
7014 
7015     case TARGET_F_SETLK:
7016     case TARGET_F_SETLKW:
7017         ret = copy_from_user_flock(&fl64, arg);
7018         if (ret) {
7019             return ret;
7020         }
7021         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7022         break;
7023 
7024     case TARGET_F_GETLK64:
7025     case TARGET_F_OFD_GETLK:
7026         ret = copy_from_user_flock64(&fl64, arg);
7027         if (ret) {
7028             return ret;
7029         }
7030         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7031         if (ret == 0) {
7032             ret = copy_to_user_flock64(arg, &fl64);
7033         }
7034         break;
7035     case TARGET_F_SETLK64:
7036     case TARGET_F_SETLKW64:
7037     case TARGET_F_OFD_SETLK:
7038     case TARGET_F_OFD_SETLKW:
7039         ret = copy_from_user_flock64(&fl64, arg);
7040         if (ret) {
7041             return ret;
7042         }
7043         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044         break;
7045 
7046     case TARGET_F_GETFL:
7047         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7048         if (ret >= 0) {
7049             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7050         }
7051         break;
7052 
7053     case TARGET_F_SETFL:
7054         ret = get_errno(safe_fcntl(fd, host_cmd,
7055                                    target_to_host_bitmask(arg,
7056                                                           fcntl_flags_tbl)));
7057         break;
7058 
7059 #ifdef F_GETOWN_EX
7060     case TARGET_F_GETOWN_EX:
7061         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7062         if (ret >= 0) {
7063             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7064                 return -TARGET_EFAULT;
7065             target_fox->type = tswap32(fox.type);
7066             target_fox->pid = tswap32(fox.pid);
7067             unlock_user_struct(target_fox, arg, 1);
7068         }
7069         break;
7070 #endif
7071 
7072 #ifdef F_SETOWN_EX
7073     case TARGET_F_SETOWN_EX:
7074         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7075             return -TARGET_EFAULT;
7076         fox.type = tswap32(target_fox->type);
7077         fox.pid = tswap32(target_fox->pid);
7078         unlock_user_struct(target_fox, arg, 0);
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7080         break;
7081 #endif
7082 
7083     case TARGET_F_SETSIG:
7084         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7085         break;
7086 
7087     case TARGET_F_GETSIG:
7088         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7089         break;
7090 
7091     case TARGET_F_SETOWN:
7092     case TARGET_F_GETOWN:
7093     case TARGET_F_SETLEASE:
7094     case TARGET_F_GETLEASE:
7095     case TARGET_F_SETPIPE_SZ:
7096     case TARGET_F_GETPIPE_SZ:
7097     case TARGET_F_ADD_SEALS:
7098     case TARGET_F_GET_SEALS:
7099         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7100         break;
7101 
7102     default:
7103         ret = get_errno(safe_fcntl(fd, cmd, arg));
7104         break;
7105     }
7106     return ret;
7107 }
7108 
7109 #ifdef USE_UID16
7110 
7111 static inline int high2lowuid(int uid)
7112 {
7113     if (uid > 65535)
7114         return 65534;
7115     else
7116         return uid;
7117 }
7118 
7119 static inline int high2lowgid(int gid)
7120 {
7121     if (gid > 65535)
7122         return 65534;
7123     else
7124         return gid;
7125 }
7126 
7127 static inline int low2highuid(int uid)
7128 {
7129     if ((int16_t)uid == -1)
7130         return -1;
7131     else
7132         return uid;
7133 }
7134 
7135 static inline int low2highgid(int gid)
7136 {
7137     if ((int16_t)gid == -1)
7138         return -1;
7139     else
7140         return gid;
7141 }
7142 static inline int tswapid(int id)
7143 {
7144     return tswap16(id);
7145 }
7146 
7147 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7148 
7149 #else /* !USE_UID16 */
7150 static inline int high2lowuid(int uid)
7151 {
7152     return uid;
7153 }
7154 static inline int high2lowgid(int gid)
7155 {
7156     return gid;
7157 }
7158 static inline int low2highuid(int uid)
7159 {
7160     return uid;
7161 }
7162 static inline int low2highgid(int gid)
7163 {
7164     return gid;
7165 }
7166 static inline int tswapid(int id)
7167 {
7168     return tswap32(id);
7169 }
7170 
7171 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7172 
7173 #endif /* USE_UID16 */
7174 
7175 /* We must do direct syscalls for setting UID/GID, because we want to
7176  * implement the Linux system call semantics of "change only for this thread",
7177  * not the libc/POSIX semantics of "change for all threads in process".
7178  * (See http://ewontfix.com/17/ for more details.)
7179  * We use the 32-bit version of the syscalls if present; if it is not
7180  * then either the host architecture supports 32-bit UIDs natively with
7181  * the standard syscall, or the 16-bit UID is the best we can do.
7182  */
7183 #ifdef __NR_setuid32
7184 #define __NR_sys_setuid __NR_setuid32
7185 #else
7186 #define __NR_sys_setuid __NR_setuid
7187 #endif
7188 #ifdef __NR_setgid32
7189 #define __NR_sys_setgid __NR_setgid32
7190 #else
7191 #define __NR_sys_setgid __NR_setgid
7192 #endif
7193 #ifdef __NR_setresuid32
7194 #define __NR_sys_setresuid __NR_setresuid32
7195 #else
7196 #define __NR_sys_setresuid __NR_setresuid
7197 #endif
7198 #ifdef __NR_setresgid32
7199 #define __NR_sys_setresgid __NR_setresgid32
7200 #else
7201 #define __NR_sys_setresgid __NR_setresgid
7202 #endif
7203 
7204 _syscall1(int, sys_setuid, uid_t, uid)
7205 _syscall1(int, sys_setgid, gid_t, gid)
7206 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7207 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7208 
7209 void syscall_init(void)
7210 {
7211     IOCTLEntry *ie;
7212     const argtype *arg_type;
7213     int size;
7214 
7215     thunk_init(STRUCT_MAX);
7216 
7217 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7218 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7219 #include "syscall_types.h"
7220 #undef STRUCT
7221 #undef STRUCT_SPECIAL
7222 
7223     /* we patch the ioctl size if necessary. We rely on the fact that
7224        no ioctl has all the bits at '1' in the size field */
7225     ie = ioctl_entries;
7226     while (ie->target_cmd != 0) {
7227         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7228             TARGET_IOC_SIZEMASK) {
7229             arg_type = ie->arg_type;
7230             if (arg_type[0] != TYPE_PTR) {
7231                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7232                         ie->target_cmd);
7233                 exit(1);
7234             }
7235             arg_type++;
7236             size = thunk_type_size(arg_type, 0);
7237             ie->target_cmd = (ie->target_cmd &
7238                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7239                 (size << TARGET_IOC_SIZESHIFT);
7240         }
7241 
7242         /* automatic consistency check if same arch */
7243 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7244     (defined(__x86_64__) && defined(TARGET_X86_64))
7245         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7246             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7247                     ie->name, ie->target_cmd, ie->host_cmd);
7248         }
7249 #endif
7250         ie++;
7251     }
7252 }
7253 
7254 #ifdef TARGET_NR_truncate64
7255 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7256                                          abi_long arg2,
7257                                          abi_long arg3,
7258                                          abi_long arg4)
7259 {
7260     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7261         arg2 = arg3;
7262         arg3 = arg4;
7263     }
7264     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7265 }
7266 #endif
7267 
7268 #ifdef TARGET_NR_ftruncate64
7269 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7270                                           abi_long arg2,
7271                                           abi_long arg3,
7272                                           abi_long arg4)
7273 {
7274     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7275         arg2 = arg3;
7276         arg3 = arg4;
7277     }
7278     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7279 }
7280 #endif
7281 
7282 #if defined(TARGET_NR_timer_settime) || \
7283     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7284 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7285                                                  abi_ulong target_addr)
7286 {
7287     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7288                                 offsetof(struct target_itimerspec,
7289                                          it_interval)) ||
7290         target_to_host_timespec(&host_its->it_value, target_addr +
7291                                 offsetof(struct target_itimerspec,
7292                                          it_value))) {
7293         return -TARGET_EFAULT;
7294     }
7295 
7296     return 0;
7297 }
7298 #endif
7299 
7300 #if defined(TARGET_NR_timer_settime64) || \
7301     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7302 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7303                                                    abi_ulong target_addr)
7304 {
7305     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7306                                   offsetof(struct target__kernel_itimerspec,
7307                                            it_interval)) ||
7308         target_to_host_timespec64(&host_its->it_value, target_addr +
7309                                   offsetof(struct target__kernel_itimerspec,
7310                                            it_value))) {
7311         return -TARGET_EFAULT;
7312     }
7313 
7314     return 0;
7315 }
7316 #endif
7317 
7318 #if ((defined(TARGET_NR_timerfd_gettime) || \
7319       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7320       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7321 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7322                                                  struct itimerspec *host_its)
7323 {
7324     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7325                                                        it_interval),
7326                                 &host_its->it_interval) ||
7327         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7328                                                        it_value),
7329                                 &host_its->it_value)) {
7330         return -TARGET_EFAULT;
7331     }
7332     return 0;
7333 }
7334 #endif
7335 
7336 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7337       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7338       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7339 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7340                                                    struct itimerspec *host_its)
7341 {
7342     if (host_to_target_timespec64(target_addr +
7343                                   offsetof(struct target__kernel_itimerspec,
7344                                            it_interval),
7345                                   &host_its->it_interval) ||
7346         host_to_target_timespec64(target_addr +
7347                                   offsetof(struct target__kernel_itimerspec,
7348                                            it_value),
7349                                   &host_its->it_value)) {
7350         return -TARGET_EFAULT;
7351     }
7352     return 0;
7353 }
7354 #endif
7355 
7356 #if defined(TARGET_NR_adjtimex) || \
7357     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7358 static inline abi_long target_to_host_timex(struct timex *host_tx,
7359                                             abi_long target_addr)
7360 {
7361     struct target_timex *target_tx;
7362 
7363     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7364         return -TARGET_EFAULT;
7365     }
7366 
7367     __get_user(host_tx->modes, &target_tx->modes);
7368     __get_user(host_tx->offset, &target_tx->offset);
7369     __get_user(host_tx->freq, &target_tx->freq);
7370     __get_user(host_tx->maxerror, &target_tx->maxerror);
7371     __get_user(host_tx->esterror, &target_tx->esterror);
7372     __get_user(host_tx->status, &target_tx->status);
7373     __get_user(host_tx->constant, &target_tx->constant);
7374     __get_user(host_tx->precision, &target_tx->precision);
7375     __get_user(host_tx->tolerance, &target_tx->tolerance);
7376     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7377     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7378     __get_user(host_tx->tick, &target_tx->tick);
7379     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7380     __get_user(host_tx->jitter, &target_tx->jitter);
7381     __get_user(host_tx->shift, &target_tx->shift);
7382     __get_user(host_tx->stabil, &target_tx->stabil);
7383     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7384     __get_user(host_tx->calcnt, &target_tx->calcnt);
7385     __get_user(host_tx->errcnt, &target_tx->errcnt);
7386     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7387     __get_user(host_tx->tai, &target_tx->tai);
7388 
7389     unlock_user_struct(target_tx, target_addr, 0);
7390     return 0;
7391 }
7392 
7393 static inline abi_long host_to_target_timex(abi_long target_addr,
7394                                             struct timex *host_tx)
7395 {
7396     struct target_timex *target_tx;
7397 
7398     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7399         return -TARGET_EFAULT;
7400     }
7401 
7402     __put_user(host_tx->modes, &target_tx->modes);
7403     __put_user(host_tx->offset, &target_tx->offset);
7404     __put_user(host_tx->freq, &target_tx->freq);
7405     __put_user(host_tx->maxerror, &target_tx->maxerror);
7406     __put_user(host_tx->esterror, &target_tx->esterror);
7407     __put_user(host_tx->status, &target_tx->status);
7408     __put_user(host_tx->constant, &target_tx->constant);
7409     __put_user(host_tx->precision, &target_tx->precision);
7410     __put_user(host_tx->tolerance, &target_tx->tolerance);
7411     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7412     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7413     __put_user(host_tx->tick, &target_tx->tick);
7414     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7415     __put_user(host_tx->jitter, &target_tx->jitter);
7416     __put_user(host_tx->shift, &target_tx->shift);
7417     __put_user(host_tx->stabil, &target_tx->stabil);
7418     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7419     __put_user(host_tx->calcnt, &target_tx->calcnt);
7420     __put_user(host_tx->errcnt, &target_tx->errcnt);
7421     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7422     __put_user(host_tx->tai, &target_tx->tai);
7423 
7424     unlock_user_struct(target_tx, target_addr, 1);
7425     return 0;
7426 }
7427 #endif
7428 
7429 
7430 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7431 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7432                                               abi_long target_addr)
7433 {
7434     struct target__kernel_timex *target_tx;
7435 
7436     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7437                                  offsetof(struct target__kernel_timex,
7438                                           time))) {
7439         return -TARGET_EFAULT;
7440     }
7441 
7442     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7443         return -TARGET_EFAULT;
7444     }
7445 
7446     __get_user(host_tx->modes, &target_tx->modes);
7447     __get_user(host_tx->offset, &target_tx->offset);
7448     __get_user(host_tx->freq, &target_tx->freq);
7449     __get_user(host_tx->maxerror, &target_tx->maxerror);
7450     __get_user(host_tx->esterror, &target_tx->esterror);
7451     __get_user(host_tx->status, &target_tx->status);
7452     __get_user(host_tx->constant, &target_tx->constant);
7453     __get_user(host_tx->precision, &target_tx->precision);
7454     __get_user(host_tx->tolerance, &target_tx->tolerance);
7455     __get_user(host_tx->tick, &target_tx->tick);
7456     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7457     __get_user(host_tx->jitter, &target_tx->jitter);
7458     __get_user(host_tx->shift, &target_tx->shift);
7459     __get_user(host_tx->stabil, &target_tx->stabil);
7460     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7461     __get_user(host_tx->calcnt, &target_tx->calcnt);
7462     __get_user(host_tx->errcnt, &target_tx->errcnt);
7463     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7464     __get_user(host_tx->tai, &target_tx->tai);
7465 
7466     unlock_user_struct(target_tx, target_addr, 0);
7467     return 0;
7468 }
7469 
7470 static inline abi_long host_to_target_timex64(abi_long target_addr,
7471                                               struct timex *host_tx)
7472 {
7473     struct target__kernel_timex *target_tx;
7474 
7475    if (copy_to_user_timeval64(target_addr +
7476                               offsetof(struct target__kernel_timex, time),
7477                               &host_tx->time)) {
7478         return -TARGET_EFAULT;
7479     }
7480 
7481     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7482         return -TARGET_EFAULT;
7483     }
7484 
7485     __put_user(host_tx->modes, &target_tx->modes);
7486     __put_user(host_tx->offset, &target_tx->offset);
7487     __put_user(host_tx->freq, &target_tx->freq);
7488     __put_user(host_tx->maxerror, &target_tx->maxerror);
7489     __put_user(host_tx->esterror, &target_tx->esterror);
7490     __put_user(host_tx->status, &target_tx->status);
7491     __put_user(host_tx->constant, &target_tx->constant);
7492     __put_user(host_tx->precision, &target_tx->precision);
7493     __put_user(host_tx->tolerance, &target_tx->tolerance);
7494     __put_user(host_tx->tick, &target_tx->tick);
7495     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7496     __put_user(host_tx->jitter, &target_tx->jitter);
7497     __put_user(host_tx->shift, &target_tx->shift);
7498     __put_user(host_tx->stabil, &target_tx->stabil);
7499     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7500     __put_user(host_tx->calcnt, &target_tx->calcnt);
7501     __put_user(host_tx->errcnt, &target_tx->errcnt);
7502     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7503     __put_user(host_tx->tai, &target_tx->tai);
7504 
7505     unlock_user_struct(target_tx, target_addr, 1);
7506     return 0;
7507 }
7508 #endif
7509 
7510 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7511 #define sigev_notify_thread_id _sigev_un._tid
7512 #endif
7513 
7514 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7515                                                abi_ulong target_addr)
7516 {
7517     struct target_sigevent *target_sevp;
7518 
7519     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7520         return -TARGET_EFAULT;
7521     }
7522 
7523     /* This union is awkward on 64 bit systems because it has a 32 bit
7524      * integer and a pointer in it; we follow the conversion approach
7525      * used for handling sigval types in signal.c so the guest should get
7526      * the correct value back even if we did a 64 bit byteswap and it's
7527      * using the 32 bit integer.
7528      */
7529     host_sevp->sigev_value.sival_ptr =
7530         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7531     host_sevp->sigev_signo =
7532         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7533     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7534     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7535 
7536     unlock_user_struct(target_sevp, target_addr, 1);
7537     return 0;
7538 }
7539 
7540 #if defined(TARGET_NR_mlockall)
7541 static inline int target_to_host_mlockall_arg(int arg)
7542 {
7543     int result = 0;
7544 
7545     if (arg & TARGET_MCL_CURRENT) {
7546         result |= MCL_CURRENT;
7547     }
7548     if (arg & TARGET_MCL_FUTURE) {
7549         result |= MCL_FUTURE;
7550     }
7551 #ifdef MCL_ONFAULT
7552     if (arg & TARGET_MCL_ONFAULT) {
7553         result |= MCL_ONFAULT;
7554     }
7555 #endif
7556 
7557     return result;
7558 }
7559 #endif
7560 
7561 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7562      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7563      defined(TARGET_NR_newfstatat))
7564 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7565                                              abi_ulong target_addr,
7566                                              struct stat *host_st)
7567 {
7568 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7569     if (cpu_env->eabi) {
7570         struct target_eabi_stat64 *target_st;
7571 
7572         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7573             return -TARGET_EFAULT;
7574         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7575         __put_user(host_st->st_dev, &target_st->st_dev);
7576         __put_user(host_st->st_ino, &target_st->st_ino);
7577 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7578         __put_user(host_st->st_ino, &target_st->__st_ino);
7579 #endif
7580         __put_user(host_st->st_mode, &target_st->st_mode);
7581         __put_user(host_st->st_nlink, &target_st->st_nlink);
7582         __put_user(host_st->st_uid, &target_st->st_uid);
7583         __put_user(host_st->st_gid, &target_st->st_gid);
7584         __put_user(host_st->st_rdev, &target_st->st_rdev);
7585         __put_user(host_st->st_size, &target_st->st_size);
7586         __put_user(host_st->st_blksize, &target_st->st_blksize);
7587         __put_user(host_st->st_blocks, &target_st->st_blocks);
7588         __put_user(host_st->st_atime, &target_st->target_st_atime);
7589         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7590         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7591 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7592         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7593         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7594         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7595 #endif
7596         unlock_user_struct(target_st, target_addr, 1);
7597     } else
7598 #endif
7599     {
7600 #if defined(TARGET_HAS_STRUCT_STAT64)
7601         struct target_stat64 *target_st;
7602 #else
7603         struct target_stat *target_st;
7604 #endif
7605 
7606         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7607             return -TARGET_EFAULT;
7608         memset(target_st, 0, sizeof(*target_st));
7609         __put_user(host_st->st_dev, &target_st->st_dev);
7610         __put_user(host_st->st_ino, &target_st->st_ino);
7611 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7612         __put_user(host_st->st_ino, &target_st->__st_ino);
7613 #endif
7614         __put_user(host_st->st_mode, &target_st->st_mode);
7615         __put_user(host_st->st_nlink, &target_st->st_nlink);
7616         __put_user(host_st->st_uid, &target_st->st_uid);
7617         __put_user(host_st->st_gid, &target_st->st_gid);
7618         __put_user(host_st->st_rdev, &target_st->st_rdev);
7619         /* XXX: better use of kernel struct */
7620         __put_user(host_st->st_size, &target_st->st_size);
7621         __put_user(host_st->st_blksize, &target_st->st_blksize);
7622         __put_user(host_st->st_blocks, &target_st->st_blocks);
7623         __put_user(host_st->st_atime, &target_st->target_st_atime);
7624         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7625         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7626 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7627         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7628         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7629         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7630 #endif
7631         unlock_user_struct(target_st, target_addr, 1);
7632     }
7633 
7634     return 0;
7635 }
7636 #endif
7637 
7638 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7639 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7640                                             abi_ulong target_addr)
7641 {
7642     struct target_statx *target_stx;
7643 
7644     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7645         return -TARGET_EFAULT;
7646     }
7647     memset(target_stx, 0, sizeof(*target_stx));
7648 
7649     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7650     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7651     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7652     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7653     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7654     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7655     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7656     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7657     __put_user(host_stx->stx_size, &target_stx->stx_size);
7658     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7659     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7660     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7661     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7662     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7663     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7664     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7665     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7666     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7667     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7668     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7669     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7670     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7671     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7672 
7673     unlock_user_struct(target_stx, target_addr, 1);
7674 
7675     return 0;
7676 }
7677 #endif
7678 
7679 static int do_sys_futex(int *uaddr, int op, int val,
7680                          const struct timespec *timeout, int *uaddr2,
7681                          int val3)
7682 {
7683 #if HOST_LONG_BITS == 64
7684 #if defined(__NR_futex)
7685     /* always a 64-bit time_t, it doesn't define _time64 version  */
7686     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7687 
7688 #endif
7689 #else /* HOST_LONG_BITS == 64 */
7690 #if defined(__NR_futex_time64)
7691     if (sizeof(timeout->tv_sec) == 8) {
7692         /* _time64 function on 32bit arch */
7693         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7694     }
7695 #endif
7696 #if defined(__NR_futex)
7697     /* old function on 32bit arch */
7698     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7699 #endif
7700 #endif /* HOST_LONG_BITS == 64 */
7701     g_assert_not_reached();
7702 }
7703 
7704 static int do_safe_futex(int *uaddr, int op, int val,
7705                          const struct timespec *timeout, int *uaddr2,
7706                          int val3)
7707 {
7708 #if HOST_LONG_BITS == 64
7709 #if defined(__NR_futex)
7710     /* always a 64-bit time_t, it doesn't define _time64 version  */
7711     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7712 #endif
7713 #else /* HOST_LONG_BITS == 64 */
7714 #if defined(__NR_futex_time64)
7715     if (sizeof(timeout->tv_sec) == 8) {
7716         /* _time64 function on 32bit arch */
7717         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7718                                            val3));
7719     }
7720 #endif
7721 #if defined(__NR_futex)
7722     /* old function on 32bit arch */
7723     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7724 #endif
7725 #endif /* HOST_LONG_BITS == 64 */
7726     return -TARGET_ENOSYS;
7727 }
7728 
7729 /* ??? Using host futex calls even when target atomic operations
7730    are not really atomic probably breaks things.  However implementing
7731    futexes locally would make futexes shared between multiple processes
7732    tricky.  However they're probably useless because guest atomic
7733    operations won't work either.  */
7734 #if defined(TARGET_NR_futex)
7735 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7736                     target_ulong timeout, target_ulong uaddr2, int val3)
7737 {
7738     struct timespec ts, *pts;
7739     int base_op;
7740 
7741     /* ??? We assume FUTEX_* constants are the same on both host
7742        and target.  */
7743 #ifdef FUTEX_CMD_MASK
7744     base_op = op & FUTEX_CMD_MASK;
7745 #else
7746     base_op = op;
7747 #endif
7748     switch (base_op) {
7749     case FUTEX_WAIT:
7750     case FUTEX_WAIT_BITSET:
7751         if (timeout) {
7752             pts = &ts;
7753             target_to_host_timespec(pts, timeout);
7754         } else {
7755             pts = NULL;
7756         }
7757         return do_safe_futex(g2h(cpu, uaddr),
7758                              op, tswap32(val), pts, NULL, val3);
7759     case FUTEX_WAKE:
7760         return do_safe_futex(g2h(cpu, uaddr),
7761                              op, val, NULL, NULL, 0);
7762     case FUTEX_FD:
7763         return do_safe_futex(g2h(cpu, uaddr),
7764                              op, val, NULL, NULL, 0);
7765     case FUTEX_REQUEUE:
7766     case FUTEX_CMP_REQUEUE:
7767     case FUTEX_WAKE_OP:
7768         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7769            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7770            But the prototype takes a `struct timespec *'; insert casts
7771            to satisfy the compiler.  We do not need to tswap TIMEOUT
7772            since it's not compared to guest memory.  */
7773         pts = (struct timespec *)(uintptr_t) timeout;
7774         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7775                              (base_op == FUTEX_CMP_REQUEUE
7776                               ? tswap32(val3) : val3));
7777     default:
7778         return -TARGET_ENOSYS;
7779     }
7780 }
7781 #endif
7782 
7783 #if defined(TARGET_NR_futex_time64)
7784 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7785                            int val, target_ulong timeout,
7786                            target_ulong uaddr2, int val3)
7787 {
7788     struct timespec ts, *pts;
7789     int base_op;
7790 
7791     /* ??? We assume FUTEX_* constants are the same on both host
7792        and target.  */
7793 #ifdef FUTEX_CMD_MASK
7794     base_op = op & FUTEX_CMD_MASK;
7795 #else
7796     base_op = op;
7797 #endif
7798     switch (base_op) {
7799     case FUTEX_WAIT:
7800     case FUTEX_WAIT_BITSET:
7801         if (timeout) {
7802             pts = &ts;
7803             if (target_to_host_timespec64(pts, timeout)) {
7804                 return -TARGET_EFAULT;
7805             }
7806         } else {
7807             pts = NULL;
7808         }
7809         return do_safe_futex(g2h(cpu, uaddr), op,
7810                              tswap32(val), pts, NULL, val3);
7811     case FUTEX_WAKE:
7812         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7813     case FUTEX_FD:
7814         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7815     case FUTEX_REQUEUE:
7816     case FUTEX_CMP_REQUEUE:
7817     case FUTEX_WAKE_OP:
7818         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7819            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7820            But the prototype takes a `struct timespec *'; insert casts
7821            to satisfy the compiler.  We do not need to tswap TIMEOUT
7822            since it's not compared to guest memory.  */
7823         pts = (struct timespec *)(uintptr_t) timeout;
7824         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7825                              (base_op == FUTEX_CMP_REQUEUE
7826                               ? tswap32(val3) : val3));
7827     default:
7828         return -TARGET_ENOSYS;
7829     }
7830 }
7831 #endif
7832 
7833 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7834 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7835                                      abi_long handle, abi_long mount_id,
7836                                      abi_long flags)
7837 {
7838     struct file_handle *target_fh;
7839     struct file_handle *fh;
7840     int mid = 0;
7841     abi_long ret;
7842     char *name;
7843     unsigned int size, total_size;
7844 
7845     if (get_user_s32(size, handle)) {
7846         return -TARGET_EFAULT;
7847     }
7848 
7849     name = lock_user_string(pathname);
7850     if (!name) {
7851         return -TARGET_EFAULT;
7852     }
7853 
7854     total_size = sizeof(struct file_handle) + size;
7855     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7856     if (!target_fh) {
7857         unlock_user(name, pathname, 0);
7858         return -TARGET_EFAULT;
7859     }
7860 
7861     fh = g_malloc0(total_size);
7862     fh->handle_bytes = size;
7863 
7864     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7865     unlock_user(name, pathname, 0);
7866 
7867     /* man name_to_handle_at(2):
7868      * Other than the use of the handle_bytes field, the caller should treat
7869      * the file_handle structure as an opaque data type
7870      */
7871 
7872     memcpy(target_fh, fh, total_size);
7873     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7874     target_fh->handle_type = tswap32(fh->handle_type);
7875     g_free(fh);
7876     unlock_user(target_fh, handle, total_size);
7877 
7878     if (put_user_s32(mid, mount_id)) {
7879         return -TARGET_EFAULT;
7880     }
7881 
7882     return ret;
7883 
7884 }
7885 #endif
7886 
7887 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7888 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7889                                      abi_long flags)
7890 {
7891     struct file_handle *target_fh;
7892     struct file_handle *fh;
7893     unsigned int size, total_size;
7894     abi_long ret;
7895 
7896     if (get_user_s32(size, handle)) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     total_size = sizeof(struct file_handle) + size;
7901     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7902     if (!target_fh) {
7903         return -TARGET_EFAULT;
7904     }
7905 
7906     fh = g_memdup(target_fh, total_size);
7907     fh->handle_bytes = size;
7908     fh->handle_type = tswap32(target_fh->handle_type);
7909 
7910     ret = get_errno(open_by_handle_at(mount_fd, fh,
7911                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7912 
7913     g_free(fh);
7914 
7915     unlock_user(target_fh, handle, total_size);
7916 
7917     return ret;
7918 }
7919 #endif
7920 
7921 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7922 
7923 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7924 {
7925     int host_flags;
7926     target_sigset_t *target_mask;
7927     sigset_t host_mask;
7928     abi_long ret;
7929 
7930     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7931         return -TARGET_EINVAL;
7932     }
7933     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7934         return -TARGET_EFAULT;
7935     }
7936 
7937     target_to_host_sigset(&host_mask, target_mask);
7938 
7939     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7940 
7941     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7942     if (ret >= 0) {
7943         fd_trans_register(ret, &target_signalfd_trans);
7944     }
7945 
7946     unlock_user_struct(target_mask, mask, 0);
7947 
7948     return ret;
7949 }
7950 #endif
7951 
7952 /* Map host to target signal numbers for the wait family of syscalls.
7953    Assume all other status bits are the same.  */
7954 int host_to_target_waitstatus(int status)
7955 {
7956     if (WIFSIGNALED(status)) {
7957         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7958     }
7959     if (WIFSTOPPED(status)) {
7960         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7961                | (status & 0xff);
7962     }
7963     return status;
7964 }
7965 
7966 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7967 {
7968     CPUState *cpu = env_cpu(cpu_env);
7969     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7970     int i;
7971 
7972     for (i = 0; i < bprm->argc; i++) {
7973         size_t len = strlen(bprm->argv[i]) + 1;
7974 
7975         if (write(fd, bprm->argv[i], len) != len) {
7976             return -1;
7977         }
7978     }
7979 
7980     return 0;
7981 }
7982 
7983 static int open_self_maps(CPUArchState *cpu_env, int fd)
7984 {
7985     CPUState *cpu = env_cpu(cpu_env);
7986     TaskState *ts = cpu->opaque;
7987     GSList *map_info = read_self_maps();
7988     GSList *s;
7989     int count;
7990 
7991     for (s = map_info; s; s = g_slist_next(s)) {
7992         MapInfo *e = (MapInfo *) s->data;
7993 
7994         if (h2g_valid(e->start)) {
7995             unsigned long min = e->start;
7996             unsigned long max = e->end;
7997             int flags = page_get_flags(h2g(min));
7998             const char *path;
7999 
8000             max = h2g_valid(max - 1) ?
8001                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8002 
8003             if (page_check_range(h2g(min), max - min, flags) == -1) {
8004                 continue;
8005             }
8006 
8007             if (h2g(min) == ts->info->stack_limit) {
8008                 path = "[stack]";
8009             } else {
8010                 path = e->path;
8011             }
8012 
8013             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8014                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8015                             h2g(min), h2g(max - 1) + 1,
8016                             (flags & PAGE_READ) ? 'r' : '-',
8017                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8018                             (flags & PAGE_EXEC) ? 'x' : '-',
8019                             e->is_priv ? 'p' : 's',
8020                             (uint64_t) e->offset, e->dev, e->inode);
8021             if (path) {
8022                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8023             } else {
8024                 dprintf(fd, "\n");
8025             }
8026         }
8027     }
8028 
8029     free_self_maps(map_info);
8030 
8031 #ifdef TARGET_VSYSCALL_PAGE
8032     /*
8033      * We only support execution from the vsyscall page.
8034      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8035      */
8036     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8037                     " --xp 00000000 00:00 0",
8038                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8039     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8040 #endif
8041 
8042     return 0;
8043 }
8044 
8045 static int open_self_stat(CPUArchState *cpu_env, int fd)
8046 {
8047     CPUState *cpu = env_cpu(cpu_env);
8048     TaskState *ts = cpu->opaque;
8049     g_autoptr(GString) buf = g_string_new(NULL);
8050     int i;
8051 
8052     for (i = 0; i < 44; i++) {
8053         if (i == 0) {
8054             /* pid */
8055             g_string_printf(buf, FMT_pid " ", getpid());
8056         } else if (i == 1) {
8057             /* app name */
8058             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8059             bin = bin ? bin + 1 : ts->bprm->argv[0];
8060             g_string_printf(buf, "(%.15s) ", bin);
8061         } else if (i == 3) {
8062             /* ppid */
8063             g_string_printf(buf, FMT_pid " ", getppid());
8064         } else if (i == 21) {
8065             /* starttime */
8066             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8067         } else if (i == 27) {
8068             /* stack bottom */
8069             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8070         } else {
8071             /* for the rest, there is MasterCard */
8072             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8073         }
8074 
8075         if (write(fd, buf->str, buf->len) != buf->len) {
8076             return -1;
8077         }
8078     }
8079 
8080     return 0;
8081 }
8082 
8083 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8084 {
8085     CPUState *cpu = env_cpu(cpu_env);
8086     TaskState *ts = cpu->opaque;
8087     abi_ulong auxv = ts->info->saved_auxv;
8088     abi_ulong len = ts->info->auxv_len;
8089     char *ptr;
8090 
8091     /*
8092      * Auxiliary vector is stored in target process stack.
8093      * read in whole auxv vector and copy it to file
8094      */
8095     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8096     if (ptr != NULL) {
8097         while (len > 0) {
8098             ssize_t r;
8099             r = write(fd, ptr, len);
8100             if (r <= 0) {
8101                 break;
8102             }
8103             len -= r;
8104             ptr += r;
8105         }
8106         lseek(fd, 0, SEEK_SET);
8107         unlock_user(ptr, auxv, len);
8108     }
8109 
8110     return 0;
8111 }
8112 
8113 static int is_proc_myself(const char *filename, const char *entry)
8114 {
8115     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8116         filename += strlen("/proc/");
8117         if (!strncmp(filename, "self/", strlen("self/"))) {
8118             filename += strlen("self/");
8119         } else if (*filename >= '1' && *filename <= '9') {
8120             char myself[80];
8121             snprintf(myself, sizeof(myself), "%d/", getpid());
8122             if (!strncmp(filename, myself, strlen(myself))) {
8123                 filename += strlen(myself);
8124             } else {
8125                 return 0;
8126             }
8127         } else {
8128             return 0;
8129         }
8130         if (!strcmp(filename, entry)) {
8131             return 1;
8132         }
8133     }
8134     return 0;
8135 }
8136 
8137 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8138     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8139 static int is_proc(const char *filename, const char *entry)
8140 {
8141     return strcmp(filename, entry) == 0;
8142 }
8143 #endif
8144 
8145 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8146 static int open_net_route(CPUArchState *cpu_env, int fd)
8147 {
8148     FILE *fp;
8149     char *line = NULL;
8150     size_t len = 0;
8151     ssize_t read;
8152 
8153     fp = fopen("/proc/net/route", "r");
8154     if (fp == NULL) {
8155         return -1;
8156     }
8157 
8158     /* read header */
8159 
8160     read = getline(&line, &len, fp);
8161     dprintf(fd, "%s", line);
8162 
8163     /* read routes */
8164 
8165     while ((read = getline(&line, &len, fp)) != -1) {
8166         char iface[16];
8167         uint32_t dest, gw, mask;
8168         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8169         int fields;
8170 
8171         fields = sscanf(line,
8172                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8173                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8174                         &mask, &mtu, &window, &irtt);
8175         if (fields != 11) {
8176             continue;
8177         }
8178         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8179                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8180                 metric, tswap32(mask), mtu, window, irtt);
8181     }
8182 
8183     free(line);
8184     fclose(fp);
8185 
8186     return 0;
8187 }
8188 #endif
8189 
8190 #if defined(TARGET_SPARC)
8191 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8192 {
8193     dprintf(fd, "type\t\t: sun4u\n");
8194     return 0;
8195 }
8196 #endif
8197 
8198 #if defined(TARGET_HPPA)
8199 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8200 {
8201     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8202     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8203     dprintf(fd, "capabilities\t: os32\n");
8204     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8205     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8206     return 0;
8207 }
8208 #endif
8209 
8210 #if defined(TARGET_M68K)
8211 static int open_hardware(CPUArchState *cpu_env, int fd)
8212 {
8213     dprintf(fd, "Model:\t\tqemu-m68k\n");
8214     return 0;
8215 }
8216 #endif
8217 
8218 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8219 {
8220     struct fake_open {
8221         const char *filename;
8222         int (*fill)(CPUArchState *cpu_env, int fd);
8223         int (*cmp)(const char *s1, const char *s2);
8224     };
8225     const struct fake_open *fake_open;
8226     static const struct fake_open fakes[] = {
8227         { "maps", open_self_maps, is_proc_myself },
8228         { "stat", open_self_stat, is_proc_myself },
8229         { "auxv", open_self_auxv, is_proc_myself },
8230         { "cmdline", open_self_cmdline, is_proc_myself },
8231 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8232         { "/proc/net/route", open_net_route, is_proc },
8233 #endif
8234 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8235         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8236 #endif
8237 #if defined(TARGET_M68K)
8238         { "/proc/hardware", open_hardware, is_proc },
8239 #endif
8240         { NULL, NULL, NULL }
8241     };
8242 
8243     if (is_proc_myself(pathname, "exe")) {
8244         int execfd = qemu_getauxval(AT_EXECFD);
8245         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8246     }
8247 
8248     for (fake_open = fakes; fake_open->filename; fake_open++) {
8249         if (fake_open->cmp(pathname, fake_open->filename)) {
8250             break;
8251         }
8252     }
8253 
8254     if (fake_open->filename) {
8255         const char *tmpdir;
8256         char filename[PATH_MAX];
8257         int fd, r;
8258 
8259         /* create temporary file to map stat to */
8260         tmpdir = getenv("TMPDIR");
8261         if (!tmpdir)
8262             tmpdir = "/tmp";
8263         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8264         fd = mkstemp(filename);
8265         if (fd < 0) {
8266             return fd;
8267         }
8268         unlink(filename);
8269 
8270         if ((r = fake_open->fill(cpu_env, fd))) {
8271             int e = errno;
8272             close(fd);
8273             errno = e;
8274             return r;
8275         }
8276         lseek(fd, 0, SEEK_SET);
8277 
8278         return fd;
8279     }
8280 
8281     return safe_openat(dirfd, path(pathname), flags, mode);
8282 }
8283 
8284 #define TIMER_MAGIC 0x0caf0000
8285 #define TIMER_MAGIC_MASK 0xffff0000
8286 
8287 /* Convert QEMU provided timer ID back to internal 16bit index format */
8288 static target_timer_t get_timer_id(abi_long arg)
8289 {
8290     target_timer_t timerid = arg;
8291 
8292     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8293         return -TARGET_EINVAL;
8294     }
8295 
8296     timerid &= 0xffff;
8297 
8298     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8299         return -TARGET_EINVAL;
8300     }
8301 
8302     return timerid;
8303 }
8304 
8305 static int target_to_host_cpu_mask(unsigned long *host_mask,
8306                                    size_t host_size,
8307                                    abi_ulong target_addr,
8308                                    size_t target_size)
8309 {
8310     unsigned target_bits = sizeof(abi_ulong) * 8;
8311     unsigned host_bits = sizeof(*host_mask) * 8;
8312     abi_ulong *target_mask;
8313     unsigned i, j;
8314 
8315     assert(host_size >= target_size);
8316 
8317     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8318     if (!target_mask) {
8319         return -TARGET_EFAULT;
8320     }
8321     memset(host_mask, 0, host_size);
8322 
8323     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8324         unsigned bit = i * target_bits;
8325         abi_ulong val;
8326 
8327         __get_user(val, &target_mask[i]);
8328         for (j = 0; j < target_bits; j++, bit++) {
8329             if (val & (1UL << j)) {
8330                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8331             }
8332         }
8333     }
8334 
8335     unlock_user(target_mask, target_addr, 0);
8336     return 0;
8337 }
8338 
8339 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8340                                    size_t host_size,
8341                                    abi_ulong target_addr,
8342                                    size_t target_size)
8343 {
8344     unsigned target_bits = sizeof(abi_ulong) * 8;
8345     unsigned host_bits = sizeof(*host_mask) * 8;
8346     abi_ulong *target_mask;
8347     unsigned i, j;
8348 
8349     assert(host_size >= target_size);
8350 
8351     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8352     if (!target_mask) {
8353         return -TARGET_EFAULT;
8354     }
8355 
8356     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8357         unsigned bit = i * target_bits;
8358         abi_ulong val = 0;
8359 
8360         for (j = 0; j < target_bits; j++, bit++) {
8361             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8362                 val |= 1UL << j;
8363             }
8364         }
8365         __put_user(val, &target_mask[i]);
8366     }
8367 
8368     unlock_user(target_mask, target_addr, target_size);
8369     return 0;
8370 }
8371 
8372 #ifdef TARGET_NR_getdents
8373 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8374 {
8375     g_autofree void *hdirp = NULL;
8376     void *tdirp;
8377     int hlen, hoff, toff;
8378     int hreclen, treclen;
8379     off64_t prev_diroff = 0;
8380 
8381     hdirp = g_try_malloc(count);
8382     if (!hdirp) {
8383         return -TARGET_ENOMEM;
8384     }
8385 
8386 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8387     hlen = sys_getdents(dirfd, hdirp, count);
8388 #else
8389     hlen = sys_getdents64(dirfd, hdirp, count);
8390 #endif
8391 
8392     hlen = get_errno(hlen);
8393     if (is_error(hlen)) {
8394         return hlen;
8395     }
8396 
8397     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8398     if (!tdirp) {
8399         return -TARGET_EFAULT;
8400     }
8401 
8402     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8403 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8404         struct linux_dirent *hde = hdirp + hoff;
8405 #else
8406         struct linux_dirent64 *hde = hdirp + hoff;
8407 #endif
8408         struct target_dirent *tde = tdirp + toff;
8409         int namelen;
8410         uint8_t type;
8411 
8412         namelen = strlen(hde->d_name);
8413         hreclen = hde->d_reclen;
8414         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8415         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8416 
8417         if (toff + treclen > count) {
8418             /*
8419              * If the host struct is smaller than the target struct, or
8420              * requires less alignment and thus packs into less space,
8421              * then the host can return more entries than we can pass
8422              * on to the guest.
8423              */
8424             if (toff == 0) {
8425                 toff = -TARGET_EINVAL; /* result buffer is too small */
8426                 break;
8427             }
8428             /*
8429              * Return what we have, resetting the file pointer to the
8430              * location of the first record not returned.
8431              */
8432             lseek64(dirfd, prev_diroff, SEEK_SET);
8433             break;
8434         }
8435 
8436         prev_diroff = hde->d_off;
8437         tde->d_ino = tswapal(hde->d_ino);
8438         tde->d_off = tswapal(hde->d_off);
8439         tde->d_reclen = tswap16(treclen);
8440         memcpy(tde->d_name, hde->d_name, namelen + 1);
8441 
8442         /*
8443          * The getdents type is in what was formerly a padding byte at the
8444          * end of the structure.
8445          */
8446 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8447         type = *((uint8_t *)hde + hreclen - 1);
8448 #else
8449         type = hde->d_type;
8450 #endif
8451         *((uint8_t *)tde + treclen - 1) = type;
8452     }
8453 
8454     unlock_user(tdirp, arg2, toff);
8455     return toff;
8456 }
8457 #endif /* TARGET_NR_getdents */
8458 
8459 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8460 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8461 {
8462     g_autofree void *hdirp = NULL;
8463     void *tdirp;
8464     int hlen, hoff, toff;
8465     int hreclen, treclen;
8466     off64_t prev_diroff = 0;
8467 
8468     hdirp = g_try_malloc(count);
8469     if (!hdirp) {
8470         return -TARGET_ENOMEM;
8471     }
8472 
8473     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8474     if (is_error(hlen)) {
8475         return hlen;
8476     }
8477 
8478     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8479     if (!tdirp) {
8480         return -TARGET_EFAULT;
8481     }
8482 
8483     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8484         struct linux_dirent64 *hde = hdirp + hoff;
8485         struct target_dirent64 *tde = tdirp + toff;
8486         int namelen;
8487 
8488         namelen = strlen(hde->d_name) + 1;
8489         hreclen = hde->d_reclen;
8490         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8491         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8492 
8493         if (toff + treclen > count) {
8494             /*
8495              * If the host struct is smaller than the target struct, or
8496              * requires less alignment and thus packs into less space,
8497              * then the host can return more entries than we can pass
8498              * on to the guest.
8499              */
8500             if (toff == 0) {
8501                 toff = -TARGET_EINVAL; /* result buffer is too small */
8502                 break;
8503             }
8504             /*
8505              * Return what we have, resetting the file pointer to the
8506              * location of the first record not returned.
8507              */
8508             lseek64(dirfd, prev_diroff, SEEK_SET);
8509             break;
8510         }
8511 
8512         prev_diroff = hde->d_off;
8513         tde->d_ino = tswap64(hde->d_ino);
8514         tde->d_off = tswap64(hde->d_off);
8515         tde->d_reclen = tswap16(treclen);
8516         tde->d_type = hde->d_type;
8517         memcpy(tde->d_name, hde->d_name, namelen);
8518     }
8519 
8520     unlock_user(tdirp, arg2, toff);
8521     return toff;
8522 }
8523 #endif /* TARGET_NR_getdents64 */
8524 
8525 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8526 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8527 #endif
8528 
8529 /* This is an internal helper for do_syscall so that it is easier
8530  * to have a single return point, so that actions, such as logging
8531  * of syscall results, can be performed.
8532  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8533  */
8534 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8535                             abi_long arg2, abi_long arg3, abi_long arg4,
8536                             abi_long arg5, abi_long arg6, abi_long arg7,
8537                             abi_long arg8)
8538 {
8539     CPUState *cpu = env_cpu(cpu_env);
8540     abi_long ret;
8541 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8542     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8543     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8544     || defined(TARGET_NR_statx)
8545     struct stat st;
8546 #endif
8547 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8548     || defined(TARGET_NR_fstatfs)
8549     struct statfs stfs;
8550 #endif
8551     void *p;
8552 
8553     switch(num) {
8554     case TARGET_NR_exit:
8555         /* In old applications this may be used to implement _exit(2).
8556            However in threaded applications it is used for thread termination,
8557            and _exit_group is used for application termination.
8558            Do thread termination if we have more then one thread.  */
8559 
8560         if (block_signals()) {
8561             return -QEMU_ERESTARTSYS;
8562         }
8563 
8564         pthread_mutex_lock(&clone_lock);
8565 
8566         if (CPU_NEXT(first_cpu)) {
8567             TaskState *ts = cpu->opaque;
8568 
8569             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8570             object_unref(OBJECT(cpu));
8571             /*
8572              * At this point the CPU should be unrealized and removed
8573              * from cpu lists. We can clean-up the rest of the thread
8574              * data without the lock held.
8575              */
8576 
8577             pthread_mutex_unlock(&clone_lock);
8578 
8579             if (ts->child_tidptr) {
8580                 put_user_u32(0, ts->child_tidptr);
8581                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8582                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8583             }
8584             thread_cpu = NULL;
8585             g_free(ts);
8586             rcu_unregister_thread();
8587             pthread_exit(NULL);
8588         }
8589 
8590         pthread_mutex_unlock(&clone_lock);
8591         preexit_cleanup(cpu_env, arg1);
8592         _exit(arg1);
8593         return 0; /* avoid warning */
8594     case TARGET_NR_read:
8595         if (arg2 == 0 && arg3 == 0) {
8596             return get_errno(safe_read(arg1, 0, 0));
8597         } else {
8598             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8599                 return -TARGET_EFAULT;
8600             ret = get_errno(safe_read(arg1, p, arg3));
8601             if (ret >= 0 &&
8602                 fd_trans_host_to_target_data(arg1)) {
8603                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8604             }
8605             unlock_user(p, arg2, ret);
8606         }
8607         return ret;
8608     case TARGET_NR_write:
8609         if (arg2 == 0 && arg3 == 0) {
8610             return get_errno(safe_write(arg1, 0, 0));
8611         }
8612         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8613             return -TARGET_EFAULT;
8614         if (fd_trans_target_to_host_data(arg1)) {
8615             void *copy = g_malloc(arg3);
8616             memcpy(copy, p, arg3);
8617             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8618             if (ret >= 0) {
8619                 ret = get_errno(safe_write(arg1, copy, ret));
8620             }
8621             g_free(copy);
8622         } else {
8623             ret = get_errno(safe_write(arg1, p, arg3));
8624         }
8625         unlock_user(p, arg2, 0);
8626         return ret;
8627 
8628 #ifdef TARGET_NR_open
8629     case TARGET_NR_open:
8630         if (!(p = lock_user_string(arg1)))
8631             return -TARGET_EFAULT;
8632         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8633                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8634                                   arg3));
8635         fd_trans_unregister(ret);
8636         unlock_user(p, arg1, 0);
8637         return ret;
8638 #endif
8639     case TARGET_NR_openat:
8640         if (!(p = lock_user_string(arg2)))
8641             return -TARGET_EFAULT;
8642         ret = get_errno(do_openat(cpu_env, arg1, p,
8643                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8644                                   arg4));
8645         fd_trans_unregister(ret);
8646         unlock_user(p, arg2, 0);
8647         return ret;
8648 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8649     case TARGET_NR_name_to_handle_at:
8650         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8651         return ret;
8652 #endif
8653 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8654     case TARGET_NR_open_by_handle_at:
8655         ret = do_open_by_handle_at(arg1, arg2, arg3);
8656         fd_trans_unregister(ret);
8657         return ret;
8658 #endif
8659     case TARGET_NR_close:
8660         fd_trans_unregister(arg1);
8661         return get_errno(close(arg1));
8662 
8663     case TARGET_NR_brk:
8664         return do_brk(arg1);
8665 #ifdef TARGET_NR_fork
8666     case TARGET_NR_fork:
8667         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8668 #endif
8669 #ifdef TARGET_NR_waitpid
8670     case TARGET_NR_waitpid:
8671         {
8672             int status;
8673             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8674             if (!is_error(ret) && arg2 && ret
8675                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8676                 return -TARGET_EFAULT;
8677         }
8678         return ret;
8679 #endif
8680 #ifdef TARGET_NR_waitid
8681     case TARGET_NR_waitid:
8682         {
8683             siginfo_t info;
8684             info.si_pid = 0;
8685             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8686             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8687                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8688                     return -TARGET_EFAULT;
8689                 host_to_target_siginfo(p, &info);
8690                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8691             }
8692         }
8693         return ret;
8694 #endif
8695 #ifdef TARGET_NR_creat /* not on alpha */
8696     case TARGET_NR_creat:
8697         if (!(p = lock_user_string(arg1)))
8698             return -TARGET_EFAULT;
8699         ret = get_errno(creat(p, arg2));
8700         fd_trans_unregister(ret);
8701         unlock_user(p, arg1, 0);
8702         return ret;
8703 #endif
8704 #ifdef TARGET_NR_link
8705     case TARGET_NR_link:
8706         {
8707             void * p2;
8708             p = lock_user_string(arg1);
8709             p2 = lock_user_string(arg2);
8710             if (!p || !p2)
8711                 ret = -TARGET_EFAULT;
8712             else
8713                 ret = get_errno(link(p, p2));
8714             unlock_user(p2, arg2, 0);
8715             unlock_user(p, arg1, 0);
8716         }
8717         return ret;
8718 #endif
8719 #if defined(TARGET_NR_linkat)
8720     case TARGET_NR_linkat:
8721         {
8722             void * p2 = NULL;
8723             if (!arg2 || !arg4)
8724                 return -TARGET_EFAULT;
8725             p  = lock_user_string(arg2);
8726             p2 = lock_user_string(arg4);
8727             if (!p || !p2)
8728                 ret = -TARGET_EFAULT;
8729             else
8730                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8731             unlock_user(p, arg2, 0);
8732             unlock_user(p2, arg4, 0);
8733         }
8734         return ret;
8735 #endif
8736 #ifdef TARGET_NR_unlink
8737     case TARGET_NR_unlink:
8738         if (!(p = lock_user_string(arg1)))
8739             return -TARGET_EFAULT;
8740         ret = get_errno(unlink(p));
8741         unlock_user(p, arg1, 0);
8742         return ret;
8743 #endif
8744 #if defined(TARGET_NR_unlinkat)
8745     case TARGET_NR_unlinkat:
8746         if (!(p = lock_user_string(arg2)))
8747             return -TARGET_EFAULT;
8748         ret = get_errno(unlinkat(arg1, p, arg3));
8749         unlock_user(p, arg2, 0);
8750         return ret;
8751 #endif
8752     case TARGET_NR_execve:
8753         {
8754             char **argp, **envp;
8755             int argc, envc;
8756             abi_ulong gp;
8757             abi_ulong guest_argp;
8758             abi_ulong guest_envp;
8759             abi_ulong addr;
8760             char **q;
8761 
8762             argc = 0;
8763             guest_argp = arg2;
8764             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8765                 if (get_user_ual(addr, gp))
8766                     return -TARGET_EFAULT;
8767                 if (!addr)
8768                     break;
8769                 argc++;
8770             }
8771             envc = 0;
8772             guest_envp = arg3;
8773             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8774                 if (get_user_ual(addr, gp))
8775                     return -TARGET_EFAULT;
8776                 if (!addr)
8777                     break;
8778                 envc++;
8779             }
8780 
8781             argp = g_new0(char *, argc + 1);
8782             envp = g_new0(char *, envc + 1);
8783 
8784             for (gp = guest_argp, q = argp; gp;
8785                   gp += sizeof(abi_ulong), q++) {
8786                 if (get_user_ual(addr, gp))
8787                     goto execve_efault;
8788                 if (!addr)
8789                     break;
8790                 if (!(*q = lock_user_string(addr)))
8791                     goto execve_efault;
8792             }
8793             *q = NULL;
8794 
8795             for (gp = guest_envp, q = envp; gp;
8796                   gp += sizeof(abi_ulong), q++) {
8797                 if (get_user_ual(addr, gp))
8798                     goto execve_efault;
8799                 if (!addr)
8800                     break;
8801                 if (!(*q = lock_user_string(addr)))
8802                     goto execve_efault;
8803             }
8804             *q = NULL;
8805 
8806             if (!(p = lock_user_string(arg1)))
8807                 goto execve_efault;
8808             /* Although execve() is not an interruptible syscall it is
8809              * a special case where we must use the safe_syscall wrapper:
8810              * if we allow a signal to happen before we make the host
8811              * syscall then we will 'lose' it, because at the point of
8812              * execve the process leaves QEMU's control. So we use the
8813              * safe syscall wrapper to ensure that we either take the
8814              * signal as a guest signal, or else it does not happen
8815              * before the execve completes and makes it the other
8816              * program's problem.
8817              */
8818             ret = get_errno(safe_execve(p, argp, envp));
8819             unlock_user(p, arg1, 0);
8820 
8821             goto execve_end;
8822 
8823         execve_efault:
8824             ret = -TARGET_EFAULT;
8825 
8826         execve_end:
8827             for (gp = guest_argp, q = argp; *q;
8828                   gp += sizeof(abi_ulong), q++) {
8829                 if (get_user_ual(addr, gp)
8830                     || !addr)
8831                     break;
8832                 unlock_user(*q, addr, 0);
8833             }
8834             for (gp = guest_envp, q = envp; *q;
8835                   gp += sizeof(abi_ulong), q++) {
8836                 if (get_user_ual(addr, gp)
8837                     || !addr)
8838                     break;
8839                 unlock_user(*q, addr, 0);
8840             }
8841 
8842             g_free(argp);
8843             g_free(envp);
8844         }
8845         return ret;
8846     case TARGET_NR_chdir:
8847         if (!(p = lock_user_string(arg1)))
8848             return -TARGET_EFAULT;
8849         ret = get_errno(chdir(p));
8850         unlock_user(p, arg1, 0);
8851         return ret;
8852 #ifdef TARGET_NR_time
8853     case TARGET_NR_time:
8854         {
8855             time_t host_time;
8856             ret = get_errno(time(&host_time));
8857             if (!is_error(ret)
8858                 && arg1
8859                 && put_user_sal(host_time, arg1))
8860                 return -TARGET_EFAULT;
8861         }
8862         return ret;
8863 #endif
8864 #ifdef TARGET_NR_mknod
8865     case TARGET_NR_mknod:
8866         if (!(p = lock_user_string(arg1)))
8867             return -TARGET_EFAULT;
8868         ret = get_errno(mknod(p, arg2, arg3));
8869         unlock_user(p, arg1, 0);
8870         return ret;
8871 #endif
8872 #if defined(TARGET_NR_mknodat)
8873     case TARGET_NR_mknodat:
8874         if (!(p = lock_user_string(arg2)))
8875             return -TARGET_EFAULT;
8876         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8877         unlock_user(p, arg2, 0);
8878         return ret;
8879 #endif
8880 #ifdef TARGET_NR_chmod
8881     case TARGET_NR_chmod:
8882         if (!(p = lock_user_string(arg1)))
8883             return -TARGET_EFAULT;
8884         ret = get_errno(chmod(p, arg2));
8885         unlock_user(p, arg1, 0);
8886         return ret;
8887 #endif
8888 #ifdef TARGET_NR_lseek
8889     case TARGET_NR_lseek:
8890         return get_errno(lseek(arg1, arg2, arg3));
8891 #endif
8892 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8893     /* Alpha specific */
8894     case TARGET_NR_getxpid:
8895         cpu_env->ir[IR_A4] = getppid();
8896         return get_errno(getpid());
8897 #endif
8898 #ifdef TARGET_NR_getpid
8899     case TARGET_NR_getpid:
8900         return get_errno(getpid());
8901 #endif
8902     case TARGET_NR_mount:
8903         {
8904             /* need to look at the data field */
8905             void *p2, *p3;
8906 
8907             if (arg1) {
8908                 p = lock_user_string(arg1);
8909                 if (!p) {
8910                     return -TARGET_EFAULT;
8911                 }
8912             } else {
8913                 p = NULL;
8914             }
8915 
8916             p2 = lock_user_string(arg2);
8917             if (!p2) {
8918                 if (arg1) {
8919                     unlock_user(p, arg1, 0);
8920                 }
8921                 return -TARGET_EFAULT;
8922             }
8923 
8924             if (arg3) {
8925                 p3 = lock_user_string(arg3);
8926                 if (!p3) {
8927                     if (arg1) {
8928                         unlock_user(p, arg1, 0);
8929                     }
8930                     unlock_user(p2, arg2, 0);
8931                     return -TARGET_EFAULT;
8932                 }
8933             } else {
8934                 p3 = NULL;
8935             }
8936 
8937             /* FIXME - arg5 should be locked, but it isn't clear how to
8938              * do that since it's not guaranteed to be a NULL-terminated
8939              * string.
8940              */
8941             if (!arg5) {
8942                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8943             } else {
8944                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8945             }
8946             ret = get_errno(ret);
8947 
8948             if (arg1) {
8949                 unlock_user(p, arg1, 0);
8950             }
8951             unlock_user(p2, arg2, 0);
8952             if (arg3) {
8953                 unlock_user(p3, arg3, 0);
8954             }
8955         }
8956         return ret;
8957 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8958 #if defined(TARGET_NR_umount)
8959     case TARGET_NR_umount:
8960 #endif
8961 #if defined(TARGET_NR_oldumount)
8962     case TARGET_NR_oldumount:
8963 #endif
8964         if (!(p = lock_user_string(arg1)))
8965             return -TARGET_EFAULT;
8966         ret = get_errno(umount(p));
8967         unlock_user(p, arg1, 0);
8968         return ret;
8969 #endif
8970 #ifdef TARGET_NR_stime /* not on alpha */
8971     case TARGET_NR_stime:
8972         {
8973             struct timespec ts;
8974             ts.tv_nsec = 0;
8975             if (get_user_sal(ts.tv_sec, arg1)) {
8976                 return -TARGET_EFAULT;
8977             }
8978             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8979         }
8980 #endif
8981 #ifdef TARGET_NR_alarm /* not on alpha */
8982     case TARGET_NR_alarm:
8983         return alarm(arg1);
8984 #endif
8985 #ifdef TARGET_NR_pause /* not on alpha */
8986     case TARGET_NR_pause:
8987         if (!block_signals()) {
8988             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8989         }
8990         return -TARGET_EINTR;
8991 #endif
8992 #ifdef TARGET_NR_utime
8993     case TARGET_NR_utime:
8994         {
8995             struct utimbuf tbuf, *host_tbuf;
8996             struct target_utimbuf *target_tbuf;
8997             if (arg2) {
8998                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8999                     return -TARGET_EFAULT;
9000                 tbuf.actime = tswapal(target_tbuf->actime);
9001                 tbuf.modtime = tswapal(target_tbuf->modtime);
9002                 unlock_user_struct(target_tbuf, arg2, 0);
9003                 host_tbuf = &tbuf;
9004             } else {
9005                 host_tbuf = NULL;
9006             }
9007             if (!(p = lock_user_string(arg1)))
9008                 return -TARGET_EFAULT;
9009             ret = get_errno(utime(p, host_tbuf));
9010             unlock_user(p, arg1, 0);
9011         }
9012         return ret;
9013 #endif
9014 #ifdef TARGET_NR_utimes
9015     case TARGET_NR_utimes:
9016         {
9017             struct timeval *tvp, tv[2];
9018             if (arg2) {
9019                 if (copy_from_user_timeval(&tv[0], arg2)
9020                     || copy_from_user_timeval(&tv[1],
9021                                               arg2 + sizeof(struct target_timeval)))
9022                     return -TARGET_EFAULT;
9023                 tvp = tv;
9024             } else {
9025                 tvp = NULL;
9026             }
9027             if (!(p = lock_user_string(arg1)))
9028                 return -TARGET_EFAULT;
9029             ret = get_errno(utimes(p, tvp));
9030             unlock_user(p, arg1, 0);
9031         }
9032         return ret;
9033 #endif
9034 #if defined(TARGET_NR_futimesat)
9035     case TARGET_NR_futimesat:
9036         {
9037             struct timeval *tvp, tv[2];
9038             if (arg3) {
9039                 if (copy_from_user_timeval(&tv[0], arg3)
9040                     || copy_from_user_timeval(&tv[1],
9041                                               arg3 + sizeof(struct target_timeval)))
9042                     return -TARGET_EFAULT;
9043                 tvp = tv;
9044             } else {
9045                 tvp = NULL;
9046             }
9047             if (!(p = lock_user_string(arg2))) {
9048                 return -TARGET_EFAULT;
9049             }
9050             ret = get_errno(futimesat(arg1, path(p), tvp));
9051             unlock_user(p, arg2, 0);
9052         }
9053         return ret;
9054 #endif
9055 #ifdef TARGET_NR_access
9056     case TARGET_NR_access:
9057         if (!(p = lock_user_string(arg1))) {
9058             return -TARGET_EFAULT;
9059         }
9060         ret = get_errno(access(path(p), arg2));
9061         unlock_user(p, arg1, 0);
9062         return ret;
9063 #endif
9064 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9065     case TARGET_NR_faccessat:
9066         if (!(p = lock_user_string(arg2))) {
9067             return -TARGET_EFAULT;
9068         }
9069         ret = get_errno(faccessat(arg1, p, arg3, 0));
9070         unlock_user(p, arg2, 0);
9071         return ret;
9072 #endif
9073 #ifdef TARGET_NR_nice /* not on alpha */
9074     case TARGET_NR_nice:
9075         return get_errno(nice(arg1));
9076 #endif
9077     case TARGET_NR_sync:
9078         sync();
9079         return 0;
9080 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9081     case TARGET_NR_syncfs:
9082         return get_errno(syncfs(arg1));
9083 #endif
9084     case TARGET_NR_kill:
9085         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9086 #ifdef TARGET_NR_rename
9087     case TARGET_NR_rename:
9088         {
9089             void *p2;
9090             p = lock_user_string(arg1);
9091             p2 = lock_user_string(arg2);
9092             if (!p || !p2)
9093                 ret = -TARGET_EFAULT;
9094             else
9095                 ret = get_errno(rename(p, p2));
9096             unlock_user(p2, arg2, 0);
9097             unlock_user(p, arg1, 0);
9098         }
9099         return ret;
9100 #endif
9101 #if defined(TARGET_NR_renameat)
9102     case TARGET_NR_renameat:
9103         {
9104             void *p2;
9105             p  = lock_user_string(arg2);
9106             p2 = lock_user_string(arg4);
9107             if (!p || !p2)
9108                 ret = -TARGET_EFAULT;
9109             else
9110                 ret = get_errno(renameat(arg1, p, arg3, p2));
9111             unlock_user(p2, arg4, 0);
9112             unlock_user(p, arg2, 0);
9113         }
9114         return ret;
9115 #endif
9116 #if defined(TARGET_NR_renameat2)
9117     case TARGET_NR_renameat2:
9118         {
9119             void *p2;
9120             p  = lock_user_string(arg2);
9121             p2 = lock_user_string(arg4);
9122             if (!p || !p2) {
9123                 ret = -TARGET_EFAULT;
9124             } else {
9125                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9126             }
9127             unlock_user(p2, arg4, 0);
9128             unlock_user(p, arg2, 0);
9129         }
9130         return ret;
9131 #endif
9132 #ifdef TARGET_NR_mkdir
9133     case TARGET_NR_mkdir:
9134         if (!(p = lock_user_string(arg1)))
9135             return -TARGET_EFAULT;
9136         ret = get_errno(mkdir(p, arg2));
9137         unlock_user(p, arg1, 0);
9138         return ret;
9139 #endif
9140 #if defined(TARGET_NR_mkdirat)
9141     case TARGET_NR_mkdirat:
9142         if (!(p = lock_user_string(arg2)))
9143             return -TARGET_EFAULT;
9144         ret = get_errno(mkdirat(arg1, p, arg3));
9145         unlock_user(p, arg2, 0);
9146         return ret;
9147 #endif
9148 #ifdef TARGET_NR_rmdir
9149     case TARGET_NR_rmdir:
9150         if (!(p = lock_user_string(arg1)))
9151             return -TARGET_EFAULT;
9152         ret = get_errno(rmdir(p));
9153         unlock_user(p, arg1, 0);
9154         return ret;
9155 #endif
9156     case TARGET_NR_dup:
9157         ret = get_errno(dup(arg1));
9158         if (ret >= 0) {
9159             fd_trans_dup(arg1, ret);
9160         }
9161         return ret;
9162 #ifdef TARGET_NR_pipe
9163     case TARGET_NR_pipe:
9164         return do_pipe(cpu_env, arg1, 0, 0);
9165 #endif
9166 #ifdef TARGET_NR_pipe2
9167     case TARGET_NR_pipe2:
9168         return do_pipe(cpu_env, arg1,
9169                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9170 #endif
9171     case TARGET_NR_times:
9172         {
9173             struct target_tms *tmsp;
9174             struct tms tms;
9175             ret = get_errno(times(&tms));
9176             if (arg1) {
9177                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9178                 if (!tmsp)
9179                     return -TARGET_EFAULT;
9180                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9181                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9182                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9183                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9184             }
9185             if (!is_error(ret))
9186                 ret = host_to_target_clock_t(ret);
9187         }
9188         return ret;
9189     case TARGET_NR_acct:
9190         if (arg1 == 0) {
9191             ret = get_errno(acct(NULL));
9192         } else {
9193             if (!(p = lock_user_string(arg1))) {
9194                 return -TARGET_EFAULT;
9195             }
9196             ret = get_errno(acct(path(p)));
9197             unlock_user(p, arg1, 0);
9198         }
9199         return ret;
9200 #ifdef TARGET_NR_umount2
9201     case TARGET_NR_umount2:
9202         if (!(p = lock_user_string(arg1)))
9203             return -TARGET_EFAULT;
9204         ret = get_errno(umount2(p, arg2));
9205         unlock_user(p, arg1, 0);
9206         return ret;
9207 #endif
9208     case TARGET_NR_ioctl:
9209         return do_ioctl(arg1, arg2, arg3);
9210 #ifdef TARGET_NR_fcntl
9211     case TARGET_NR_fcntl:
9212         return do_fcntl(arg1, arg2, arg3);
9213 #endif
9214     case TARGET_NR_setpgid:
9215         return get_errno(setpgid(arg1, arg2));
9216     case TARGET_NR_umask:
9217         return get_errno(umask(arg1));
9218     case TARGET_NR_chroot:
9219         if (!(p = lock_user_string(arg1)))
9220             return -TARGET_EFAULT;
9221         ret = get_errno(chroot(p));
9222         unlock_user(p, arg1, 0);
9223         return ret;
9224 #ifdef TARGET_NR_dup2
9225     case TARGET_NR_dup2:
9226         ret = get_errno(dup2(arg1, arg2));
9227         if (ret >= 0) {
9228             fd_trans_dup(arg1, arg2);
9229         }
9230         return ret;
9231 #endif
9232 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9233     case TARGET_NR_dup3:
9234     {
9235         int host_flags;
9236 
9237         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9238             return -EINVAL;
9239         }
9240         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9241         ret = get_errno(dup3(arg1, arg2, host_flags));
9242         if (ret >= 0) {
9243             fd_trans_dup(arg1, arg2);
9244         }
9245         return ret;
9246     }
9247 #endif
9248 #ifdef TARGET_NR_getppid /* not on alpha */
9249     case TARGET_NR_getppid:
9250         return get_errno(getppid());
9251 #endif
9252 #ifdef TARGET_NR_getpgrp
9253     case TARGET_NR_getpgrp:
9254         return get_errno(getpgrp());
9255 #endif
9256     case TARGET_NR_setsid:
9257         return get_errno(setsid());
9258 #ifdef TARGET_NR_sigaction
9259     case TARGET_NR_sigaction:
9260         {
9261 #if defined(TARGET_MIPS)
9262 	    struct target_sigaction act, oact, *pact, *old_act;
9263 
9264 	    if (arg2) {
9265                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9266                     return -TARGET_EFAULT;
9267 		act._sa_handler = old_act->_sa_handler;
9268 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9269 		act.sa_flags = old_act->sa_flags;
9270 		unlock_user_struct(old_act, arg2, 0);
9271 		pact = &act;
9272 	    } else {
9273 		pact = NULL;
9274 	    }
9275 
9276         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9277 
9278 	    if (!is_error(ret) && arg3) {
9279                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9280                     return -TARGET_EFAULT;
9281 		old_act->_sa_handler = oact._sa_handler;
9282 		old_act->sa_flags = oact.sa_flags;
9283 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9284 		old_act->sa_mask.sig[1] = 0;
9285 		old_act->sa_mask.sig[2] = 0;
9286 		old_act->sa_mask.sig[3] = 0;
9287 		unlock_user_struct(old_act, arg3, 1);
9288 	    }
9289 #else
9290             struct target_old_sigaction *old_act;
9291             struct target_sigaction act, oact, *pact;
9292             if (arg2) {
9293                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9294                     return -TARGET_EFAULT;
9295                 act._sa_handler = old_act->_sa_handler;
9296                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9297                 act.sa_flags = old_act->sa_flags;
9298 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9299                 act.sa_restorer = old_act->sa_restorer;
9300 #endif
9301                 unlock_user_struct(old_act, arg2, 0);
9302                 pact = &act;
9303             } else {
9304                 pact = NULL;
9305             }
9306             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9307             if (!is_error(ret) && arg3) {
9308                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9309                     return -TARGET_EFAULT;
9310                 old_act->_sa_handler = oact._sa_handler;
9311                 old_act->sa_mask = oact.sa_mask.sig[0];
9312                 old_act->sa_flags = oact.sa_flags;
9313 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9314                 old_act->sa_restorer = oact.sa_restorer;
9315 #endif
9316                 unlock_user_struct(old_act, arg3, 1);
9317             }
9318 #endif
9319         }
9320         return ret;
9321 #endif
9322     case TARGET_NR_rt_sigaction:
9323         {
9324             /*
9325              * For Alpha and SPARC this is a 5 argument syscall, with
9326              * a 'restorer' parameter which must be copied into the
9327              * sa_restorer field of the sigaction struct.
9328              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9329              * and arg5 is the sigsetsize.
9330              */
9331 #if defined(TARGET_ALPHA)
9332             target_ulong sigsetsize = arg4;
9333             target_ulong restorer = arg5;
9334 #elif defined(TARGET_SPARC)
9335             target_ulong restorer = arg4;
9336             target_ulong sigsetsize = arg5;
9337 #else
9338             target_ulong sigsetsize = arg4;
9339             target_ulong restorer = 0;
9340 #endif
9341             struct target_sigaction *act = NULL;
9342             struct target_sigaction *oact = NULL;
9343 
9344             if (sigsetsize != sizeof(target_sigset_t)) {
9345                 return -TARGET_EINVAL;
9346             }
9347             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9348                 return -TARGET_EFAULT;
9349             }
9350             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9351                 ret = -TARGET_EFAULT;
9352             } else {
9353                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9354                 if (oact) {
9355                     unlock_user_struct(oact, arg3, 1);
9356                 }
9357             }
9358             if (act) {
9359                 unlock_user_struct(act, arg2, 0);
9360             }
9361         }
9362         return ret;
9363 #ifdef TARGET_NR_sgetmask /* not on alpha */
9364     case TARGET_NR_sgetmask:
9365         {
9366             sigset_t cur_set;
9367             abi_ulong target_set;
9368             ret = do_sigprocmask(0, NULL, &cur_set);
9369             if (!ret) {
9370                 host_to_target_old_sigset(&target_set, &cur_set);
9371                 ret = target_set;
9372             }
9373         }
9374         return ret;
9375 #endif
9376 #ifdef TARGET_NR_ssetmask /* not on alpha */
9377     case TARGET_NR_ssetmask:
9378         {
9379             sigset_t set, oset;
9380             abi_ulong target_set = arg1;
9381             target_to_host_old_sigset(&set, &target_set);
9382             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9383             if (!ret) {
9384                 host_to_target_old_sigset(&target_set, &oset);
9385                 ret = target_set;
9386             }
9387         }
9388         return ret;
9389 #endif
9390 #ifdef TARGET_NR_sigprocmask
9391     case TARGET_NR_sigprocmask:
9392         {
9393 #if defined(TARGET_ALPHA)
9394             sigset_t set, oldset;
9395             abi_ulong mask;
9396             int how;
9397 
9398             switch (arg1) {
9399             case TARGET_SIG_BLOCK:
9400                 how = SIG_BLOCK;
9401                 break;
9402             case TARGET_SIG_UNBLOCK:
9403                 how = SIG_UNBLOCK;
9404                 break;
9405             case TARGET_SIG_SETMASK:
9406                 how = SIG_SETMASK;
9407                 break;
9408             default:
9409                 return -TARGET_EINVAL;
9410             }
9411             mask = arg2;
9412             target_to_host_old_sigset(&set, &mask);
9413 
9414             ret = do_sigprocmask(how, &set, &oldset);
9415             if (!is_error(ret)) {
9416                 host_to_target_old_sigset(&mask, &oldset);
9417                 ret = mask;
9418                 cpu_env->ir[IR_V0] = 0; /* force no error */
9419             }
9420 #else
9421             sigset_t set, oldset, *set_ptr;
9422             int how;
9423 
9424             if (arg2) {
9425                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9426                 if (!p) {
9427                     return -TARGET_EFAULT;
9428                 }
9429                 target_to_host_old_sigset(&set, p);
9430                 unlock_user(p, arg2, 0);
9431                 set_ptr = &set;
9432                 switch (arg1) {
9433                 case TARGET_SIG_BLOCK:
9434                     how = SIG_BLOCK;
9435                     break;
9436                 case TARGET_SIG_UNBLOCK:
9437                     how = SIG_UNBLOCK;
9438                     break;
9439                 case TARGET_SIG_SETMASK:
9440                     how = SIG_SETMASK;
9441                     break;
9442                 default:
9443                     return -TARGET_EINVAL;
9444                 }
9445             } else {
9446                 how = 0;
9447                 set_ptr = NULL;
9448             }
9449             ret = do_sigprocmask(how, set_ptr, &oldset);
9450             if (!is_error(ret) && arg3) {
9451                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9452                     return -TARGET_EFAULT;
9453                 host_to_target_old_sigset(p, &oldset);
9454                 unlock_user(p, arg3, sizeof(target_sigset_t));
9455             }
9456 #endif
9457         }
9458         return ret;
9459 #endif
9460     case TARGET_NR_rt_sigprocmask:
9461         {
9462             int how = arg1;
9463             sigset_t set, oldset, *set_ptr;
9464 
9465             if (arg4 != sizeof(target_sigset_t)) {
9466                 return -TARGET_EINVAL;
9467             }
9468 
9469             if (arg2) {
9470                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9471                 if (!p) {
9472                     return -TARGET_EFAULT;
9473                 }
9474                 target_to_host_sigset(&set, p);
9475                 unlock_user(p, arg2, 0);
9476                 set_ptr = &set;
9477                 switch(how) {
9478                 case TARGET_SIG_BLOCK:
9479                     how = SIG_BLOCK;
9480                     break;
9481                 case TARGET_SIG_UNBLOCK:
9482                     how = SIG_UNBLOCK;
9483                     break;
9484                 case TARGET_SIG_SETMASK:
9485                     how = SIG_SETMASK;
9486                     break;
9487                 default:
9488                     return -TARGET_EINVAL;
9489                 }
9490             } else {
9491                 how = 0;
9492                 set_ptr = NULL;
9493             }
9494             ret = do_sigprocmask(how, set_ptr, &oldset);
9495             if (!is_error(ret) && arg3) {
9496                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9497                     return -TARGET_EFAULT;
9498                 host_to_target_sigset(p, &oldset);
9499                 unlock_user(p, arg3, sizeof(target_sigset_t));
9500             }
9501         }
9502         return ret;
9503 #ifdef TARGET_NR_sigpending
9504     case TARGET_NR_sigpending:
9505         {
9506             sigset_t set;
9507             ret = get_errno(sigpending(&set));
9508             if (!is_error(ret)) {
9509                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9510                     return -TARGET_EFAULT;
9511                 host_to_target_old_sigset(p, &set);
9512                 unlock_user(p, arg1, sizeof(target_sigset_t));
9513             }
9514         }
9515         return ret;
9516 #endif
9517     case TARGET_NR_rt_sigpending:
9518         {
9519             sigset_t set;
9520 
9521             /* Yes, this check is >, not != like most. We follow the kernel's
9522              * logic and it does it like this because it implements
9523              * NR_sigpending through the same code path, and in that case
9524              * the old_sigset_t is smaller in size.
9525              */
9526             if (arg2 > sizeof(target_sigset_t)) {
9527                 return -TARGET_EINVAL;
9528             }
9529 
9530             ret = get_errno(sigpending(&set));
9531             if (!is_error(ret)) {
9532                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9533                     return -TARGET_EFAULT;
9534                 host_to_target_sigset(p, &set);
9535                 unlock_user(p, arg1, sizeof(target_sigset_t));
9536             }
9537         }
9538         return ret;
9539 #ifdef TARGET_NR_sigsuspend
9540     case TARGET_NR_sigsuspend:
9541         {
9542             sigset_t *set;
9543 
9544 #if defined(TARGET_ALPHA)
9545             TaskState *ts = cpu->opaque;
9546             /* target_to_host_old_sigset will bswap back */
9547             abi_ulong mask = tswapal(arg1);
9548             set = &ts->sigsuspend_mask;
9549             target_to_host_old_sigset(set, &mask);
9550 #else
9551             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9552             if (ret != 0) {
9553                 return ret;
9554             }
9555 #endif
9556             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9557             finish_sigsuspend_mask(ret);
9558         }
9559         return ret;
9560 #endif
9561     case TARGET_NR_rt_sigsuspend:
9562         {
9563             sigset_t *set;
9564 
9565             ret = process_sigsuspend_mask(&set, arg1, arg2);
9566             if (ret != 0) {
9567                 return ret;
9568             }
9569             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9570             finish_sigsuspend_mask(ret);
9571         }
9572         return ret;
9573 #ifdef TARGET_NR_rt_sigtimedwait
9574     case TARGET_NR_rt_sigtimedwait:
9575         {
9576             sigset_t set;
9577             struct timespec uts, *puts;
9578             siginfo_t uinfo;
9579 
9580             if (arg4 != sizeof(target_sigset_t)) {
9581                 return -TARGET_EINVAL;
9582             }
9583 
9584             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9585                 return -TARGET_EFAULT;
9586             target_to_host_sigset(&set, p);
9587             unlock_user(p, arg1, 0);
9588             if (arg3) {
9589                 puts = &uts;
9590                 if (target_to_host_timespec(puts, arg3)) {
9591                     return -TARGET_EFAULT;
9592                 }
9593             } else {
9594                 puts = NULL;
9595             }
9596             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9597                                                  SIGSET_T_SIZE));
9598             if (!is_error(ret)) {
9599                 if (arg2) {
9600                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9601                                   0);
9602                     if (!p) {
9603                         return -TARGET_EFAULT;
9604                     }
9605                     host_to_target_siginfo(p, &uinfo);
9606                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9607                 }
9608                 ret = host_to_target_signal(ret);
9609             }
9610         }
9611         return ret;
9612 #endif
9613 #ifdef TARGET_NR_rt_sigtimedwait_time64
9614     case TARGET_NR_rt_sigtimedwait_time64:
9615         {
9616             sigset_t set;
9617             struct timespec uts, *puts;
9618             siginfo_t uinfo;
9619 
9620             if (arg4 != sizeof(target_sigset_t)) {
9621                 return -TARGET_EINVAL;
9622             }
9623 
9624             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9625             if (!p) {
9626                 return -TARGET_EFAULT;
9627             }
9628             target_to_host_sigset(&set, p);
9629             unlock_user(p, arg1, 0);
9630             if (arg3) {
9631                 puts = &uts;
9632                 if (target_to_host_timespec64(puts, arg3)) {
9633                     return -TARGET_EFAULT;
9634                 }
9635             } else {
9636                 puts = NULL;
9637             }
9638             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9639                                                  SIGSET_T_SIZE));
9640             if (!is_error(ret)) {
9641                 if (arg2) {
9642                     p = lock_user(VERIFY_WRITE, arg2,
9643                                   sizeof(target_siginfo_t), 0);
9644                     if (!p) {
9645                         return -TARGET_EFAULT;
9646                     }
9647                     host_to_target_siginfo(p, &uinfo);
9648                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9649                 }
9650                 ret = host_to_target_signal(ret);
9651             }
9652         }
9653         return ret;
9654 #endif
9655     case TARGET_NR_rt_sigqueueinfo:
9656         {
9657             siginfo_t uinfo;
9658 
9659             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9660             if (!p) {
9661                 return -TARGET_EFAULT;
9662             }
9663             target_to_host_siginfo(&uinfo, p);
9664             unlock_user(p, arg3, 0);
9665             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9666         }
9667         return ret;
9668     case TARGET_NR_rt_tgsigqueueinfo:
9669         {
9670             siginfo_t uinfo;
9671 
9672             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9673             if (!p) {
9674                 return -TARGET_EFAULT;
9675             }
9676             target_to_host_siginfo(&uinfo, p);
9677             unlock_user(p, arg4, 0);
9678             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9679         }
9680         return ret;
9681 #ifdef TARGET_NR_sigreturn
9682     case TARGET_NR_sigreturn:
9683         if (block_signals()) {
9684             return -QEMU_ERESTARTSYS;
9685         }
9686         return do_sigreturn(cpu_env);
9687 #endif
9688     case TARGET_NR_rt_sigreturn:
9689         if (block_signals()) {
9690             return -QEMU_ERESTARTSYS;
9691         }
9692         return do_rt_sigreturn(cpu_env);
9693     case TARGET_NR_sethostname:
9694         if (!(p = lock_user_string(arg1)))
9695             return -TARGET_EFAULT;
9696         ret = get_errno(sethostname(p, arg2));
9697         unlock_user(p, arg1, 0);
9698         return ret;
9699 #ifdef TARGET_NR_setrlimit
9700     case TARGET_NR_setrlimit:
9701         {
9702             int resource = target_to_host_resource(arg1);
9703             struct target_rlimit *target_rlim;
9704             struct rlimit rlim;
9705             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9706                 return -TARGET_EFAULT;
9707             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9708             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9709             unlock_user_struct(target_rlim, arg2, 0);
9710             /*
9711              * If we just passed through resource limit settings for memory then
9712              * they would also apply to QEMU's own allocations, and QEMU will
9713              * crash or hang or die if its allocations fail. Ideally we would
9714              * track the guest allocations in QEMU and apply the limits ourselves.
9715              * For now, just tell the guest the call succeeded but don't actually
9716              * limit anything.
9717              */
9718             if (resource != RLIMIT_AS &&
9719                 resource != RLIMIT_DATA &&
9720                 resource != RLIMIT_STACK) {
9721                 return get_errno(setrlimit(resource, &rlim));
9722             } else {
9723                 return 0;
9724             }
9725         }
9726 #endif
9727 #ifdef TARGET_NR_getrlimit
9728     case TARGET_NR_getrlimit:
9729         {
9730             int resource = target_to_host_resource(arg1);
9731             struct target_rlimit *target_rlim;
9732             struct rlimit rlim;
9733 
9734             ret = get_errno(getrlimit(resource, &rlim));
9735             if (!is_error(ret)) {
9736                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9737                     return -TARGET_EFAULT;
9738                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9739                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9740                 unlock_user_struct(target_rlim, arg2, 1);
9741             }
9742         }
9743         return ret;
9744 #endif
9745     case TARGET_NR_getrusage:
9746         {
9747             struct rusage rusage;
9748             ret = get_errno(getrusage(arg1, &rusage));
9749             if (!is_error(ret)) {
9750                 ret = host_to_target_rusage(arg2, &rusage);
9751             }
9752         }
9753         return ret;
9754 #if defined(TARGET_NR_gettimeofday)
9755     case TARGET_NR_gettimeofday:
9756         {
9757             struct timeval tv;
9758             struct timezone tz;
9759 
9760             ret = get_errno(gettimeofday(&tv, &tz));
9761             if (!is_error(ret)) {
9762                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9763                     return -TARGET_EFAULT;
9764                 }
9765                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9766                     return -TARGET_EFAULT;
9767                 }
9768             }
9769         }
9770         return ret;
9771 #endif
9772 #if defined(TARGET_NR_settimeofday)
9773     case TARGET_NR_settimeofday:
9774         {
9775             struct timeval tv, *ptv = NULL;
9776             struct timezone tz, *ptz = NULL;
9777 
9778             if (arg1) {
9779                 if (copy_from_user_timeval(&tv, arg1)) {
9780                     return -TARGET_EFAULT;
9781                 }
9782                 ptv = &tv;
9783             }
9784 
9785             if (arg2) {
9786                 if (copy_from_user_timezone(&tz, arg2)) {
9787                     return -TARGET_EFAULT;
9788                 }
9789                 ptz = &tz;
9790             }
9791 
9792             return get_errno(settimeofday(ptv, ptz));
9793         }
9794 #endif
9795 #if defined(TARGET_NR_select)
9796     case TARGET_NR_select:
9797 #if defined(TARGET_WANT_NI_OLD_SELECT)
9798         /* some architectures used to have old_select here
9799          * but now ENOSYS it.
9800          */
9801         ret = -TARGET_ENOSYS;
9802 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9803         ret = do_old_select(arg1);
9804 #else
9805         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9806 #endif
9807         return ret;
9808 #endif
9809 #ifdef TARGET_NR_pselect6
9810     case TARGET_NR_pselect6:
9811         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9812 #endif
9813 #ifdef TARGET_NR_pselect6_time64
9814     case TARGET_NR_pselect6_time64:
9815         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9816 #endif
9817 #ifdef TARGET_NR_symlink
9818     case TARGET_NR_symlink:
9819         {
9820             void *p2;
9821             p = lock_user_string(arg1);
9822             p2 = lock_user_string(arg2);
9823             if (!p || !p2)
9824                 ret = -TARGET_EFAULT;
9825             else
9826                 ret = get_errno(symlink(p, p2));
9827             unlock_user(p2, arg2, 0);
9828             unlock_user(p, arg1, 0);
9829         }
9830         return ret;
9831 #endif
9832 #if defined(TARGET_NR_symlinkat)
9833     case TARGET_NR_symlinkat:
9834         {
9835             void *p2;
9836             p  = lock_user_string(arg1);
9837             p2 = lock_user_string(arg3);
9838             if (!p || !p2)
9839                 ret = -TARGET_EFAULT;
9840             else
9841                 ret = get_errno(symlinkat(p, arg2, p2));
9842             unlock_user(p2, arg3, 0);
9843             unlock_user(p, arg1, 0);
9844         }
9845         return ret;
9846 #endif
9847 #ifdef TARGET_NR_readlink
9848     case TARGET_NR_readlink:
9849         {
9850             void *p2;
9851             p = lock_user_string(arg1);
9852             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9853             if (!p || !p2) {
9854                 ret = -TARGET_EFAULT;
9855             } else if (!arg3) {
9856                 /* Short circuit this for the magic exe check. */
9857                 ret = -TARGET_EINVAL;
9858             } else if (is_proc_myself((const char *)p, "exe")) {
9859                 char real[PATH_MAX], *temp;
9860                 temp = realpath(exec_path, real);
9861                 /* Return value is # of bytes that we wrote to the buffer. */
9862                 if (temp == NULL) {
9863                     ret = get_errno(-1);
9864                 } else {
9865                     /* Don't worry about sign mismatch as earlier mapping
9866                      * logic would have thrown a bad address error. */
9867                     ret = MIN(strlen(real), arg3);
9868                     /* We cannot NUL terminate the string. */
9869                     memcpy(p2, real, ret);
9870                 }
9871             } else {
9872                 ret = get_errno(readlink(path(p), p2, arg3));
9873             }
9874             unlock_user(p2, arg2, ret);
9875             unlock_user(p, arg1, 0);
9876         }
9877         return ret;
9878 #endif
9879 #if defined(TARGET_NR_readlinkat)
9880     case TARGET_NR_readlinkat:
9881         {
9882             void *p2;
9883             p  = lock_user_string(arg2);
9884             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9885             if (!p || !p2) {
9886                 ret = -TARGET_EFAULT;
9887             } else if (is_proc_myself((const char *)p, "exe")) {
9888                 char real[PATH_MAX], *temp;
9889                 temp = realpath(exec_path, real);
9890                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9891                 snprintf((char *)p2, arg4, "%s", real);
9892             } else {
9893                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9894             }
9895             unlock_user(p2, arg3, ret);
9896             unlock_user(p, arg2, 0);
9897         }
9898         return ret;
9899 #endif
9900 #ifdef TARGET_NR_swapon
9901     case TARGET_NR_swapon:
9902         if (!(p = lock_user_string(arg1)))
9903             return -TARGET_EFAULT;
9904         ret = get_errno(swapon(p, arg2));
9905         unlock_user(p, arg1, 0);
9906         return ret;
9907 #endif
9908     case TARGET_NR_reboot:
9909         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9910            /* arg4 must be ignored in all other cases */
9911            p = lock_user_string(arg4);
9912            if (!p) {
9913                return -TARGET_EFAULT;
9914            }
9915            ret = get_errno(reboot(arg1, arg2, arg3, p));
9916            unlock_user(p, arg4, 0);
9917         } else {
9918            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9919         }
9920         return ret;
9921 #ifdef TARGET_NR_mmap
9922     case TARGET_NR_mmap:
9923 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9924     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9925     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9926     || defined(TARGET_S390X)
9927         {
9928             abi_ulong *v;
9929             abi_ulong v1, v2, v3, v4, v5, v6;
9930             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9931                 return -TARGET_EFAULT;
9932             v1 = tswapal(v[0]);
9933             v2 = tswapal(v[1]);
9934             v3 = tswapal(v[2]);
9935             v4 = tswapal(v[3]);
9936             v5 = tswapal(v[4]);
9937             v6 = tswapal(v[5]);
9938             unlock_user(v, arg1, 0);
9939             ret = get_errno(target_mmap(v1, v2, v3,
9940                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9941                                         v5, v6));
9942         }
9943 #else
9944         /* mmap pointers are always untagged */
9945         ret = get_errno(target_mmap(arg1, arg2, arg3,
9946                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9947                                     arg5,
9948                                     arg6));
9949 #endif
9950         return ret;
9951 #endif
9952 #ifdef TARGET_NR_mmap2
9953     case TARGET_NR_mmap2:
9954 #ifndef MMAP_SHIFT
9955 #define MMAP_SHIFT 12
9956 #endif
9957         ret = target_mmap(arg1, arg2, arg3,
9958                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9959                           arg5, arg6 << MMAP_SHIFT);
9960         return get_errno(ret);
9961 #endif
9962     case TARGET_NR_munmap:
9963         arg1 = cpu_untagged_addr(cpu, arg1);
9964         return get_errno(target_munmap(arg1, arg2));
9965     case TARGET_NR_mprotect:
9966         arg1 = cpu_untagged_addr(cpu, arg1);
9967         {
9968             TaskState *ts = cpu->opaque;
9969             /* Special hack to detect libc making the stack executable.  */
9970             if ((arg3 & PROT_GROWSDOWN)
9971                 && arg1 >= ts->info->stack_limit
9972                 && arg1 <= ts->info->start_stack) {
9973                 arg3 &= ~PROT_GROWSDOWN;
9974                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9975                 arg1 = ts->info->stack_limit;
9976             }
9977         }
9978         return get_errno(target_mprotect(arg1, arg2, arg3));
9979 #ifdef TARGET_NR_mremap
9980     case TARGET_NR_mremap:
9981         arg1 = cpu_untagged_addr(cpu, arg1);
9982         /* mremap new_addr (arg5) is always untagged */
9983         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9984 #endif
9985         /* ??? msync/mlock/munlock are broken for softmmu.  */
9986 #ifdef TARGET_NR_msync
9987     case TARGET_NR_msync:
9988         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9989 #endif
9990 #ifdef TARGET_NR_mlock
9991     case TARGET_NR_mlock:
9992         return get_errno(mlock(g2h(cpu, arg1), arg2));
9993 #endif
9994 #ifdef TARGET_NR_munlock
9995     case TARGET_NR_munlock:
9996         return get_errno(munlock(g2h(cpu, arg1), arg2));
9997 #endif
9998 #ifdef TARGET_NR_mlockall
9999     case TARGET_NR_mlockall:
10000         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10001 #endif
10002 #ifdef TARGET_NR_munlockall
10003     case TARGET_NR_munlockall:
10004         return get_errno(munlockall());
10005 #endif
10006 #ifdef TARGET_NR_truncate
10007     case TARGET_NR_truncate:
10008         if (!(p = lock_user_string(arg1)))
10009             return -TARGET_EFAULT;
10010         ret = get_errno(truncate(p, arg2));
10011         unlock_user(p, arg1, 0);
10012         return ret;
10013 #endif
10014 #ifdef TARGET_NR_ftruncate
10015     case TARGET_NR_ftruncate:
10016         return get_errno(ftruncate(arg1, arg2));
10017 #endif
10018     case TARGET_NR_fchmod:
10019         return get_errno(fchmod(arg1, arg2));
10020 #if defined(TARGET_NR_fchmodat)
10021     case TARGET_NR_fchmodat:
10022         if (!(p = lock_user_string(arg2)))
10023             return -TARGET_EFAULT;
10024         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10025         unlock_user(p, arg2, 0);
10026         return ret;
10027 #endif
10028     case TARGET_NR_getpriority:
10029         /* Note that negative values are valid for getpriority, so we must
10030            differentiate based on errno settings.  */
10031         errno = 0;
10032         ret = getpriority(arg1, arg2);
10033         if (ret == -1 && errno != 0) {
10034             return -host_to_target_errno(errno);
10035         }
10036 #ifdef TARGET_ALPHA
10037         /* Return value is the unbiased priority.  Signal no error.  */
10038         cpu_env->ir[IR_V0] = 0;
10039 #else
10040         /* Return value is a biased priority to avoid negative numbers.  */
10041         ret = 20 - ret;
10042 #endif
10043         return ret;
10044     case TARGET_NR_setpriority:
10045         return get_errno(setpriority(arg1, arg2, arg3));
10046 #ifdef TARGET_NR_statfs
10047     case TARGET_NR_statfs:
10048         if (!(p = lock_user_string(arg1))) {
10049             return -TARGET_EFAULT;
10050         }
10051         ret = get_errno(statfs(path(p), &stfs));
10052         unlock_user(p, arg1, 0);
10053     convert_statfs:
10054         if (!is_error(ret)) {
10055             struct target_statfs *target_stfs;
10056 
10057             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10058                 return -TARGET_EFAULT;
10059             __put_user(stfs.f_type, &target_stfs->f_type);
10060             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10061             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10062             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10063             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10064             __put_user(stfs.f_files, &target_stfs->f_files);
10065             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10066             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10067             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10068             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10069             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10070 #ifdef _STATFS_F_FLAGS
10071             __put_user(stfs.f_flags, &target_stfs->f_flags);
10072 #else
10073             __put_user(0, &target_stfs->f_flags);
10074 #endif
10075             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10076             unlock_user_struct(target_stfs, arg2, 1);
10077         }
10078         return ret;
10079 #endif
10080 #ifdef TARGET_NR_fstatfs
10081     case TARGET_NR_fstatfs:
10082         ret = get_errno(fstatfs(arg1, &stfs));
10083         goto convert_statfs;
10084 #endif
10085 #ifdef TARGET_NR_statfs64
10086     case TARGET_NR_statfs64:
10087         if (!(p = lock_user_string(arg1))) {
10088             return -TARGET_EFAULT;
10089         }
10090         ret = get_errno(statfs(path(p), &stfs));
10091         unlock_user(p, arg1, 0);
10092     convert_statfs64:
10093         if (!is_error(ret)) {
10094             struct target_statfs64 *target_stfs;
10095 
10096             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10097                 return -TARGET_EFAULT;
10098             __put_user(stfs.f_type, &target_stfs->f_type);
10099             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10100             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10101             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10102             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10103             __put_user(stfs.f_files, &target_stfs->f_files);
10104             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10105             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10106             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10107             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10108             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10109 #ifdef _STATFS_F_FLAGS
10110             __put_user(stfs.f_flags, &target_stfs->f_flags);
10111 #else
10112             __put_user(0, &target_stfs->f_flags);
10113 #endif
10114             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10115             unlock_user_struct(target_stfs, arg3, 1);
10116         }
10117         return ret;
10118     case TARGET_NR_fstatfs64:
10119         ret = get_errno(fstatfs(arg1, &stfs));
10120         goto convert_statfs64;
10121 #endif
10122 #ifdef TARGET_NR_socketcall
10123     case TARGET_NR_socketcall:
10124         return do_socketcall(arg1, arg2);
10125 #endif
10126 #ifdef TARGET_NR_accept
10127     case TARGET_NR_accept:
10128         return do_accept4(arg1, arg2, arg3, 0);
10129 #endif
10130 #ifdef TARGET_NR_accept4
10131     case TARGET_NR_accept4:
10132         return do_accept4(arg1, arg2, arg3, arg4);
10133 #endif
10134 #ifdef TARGET_NR_bind
10135     case TARGET_NR_bind:
10136         return do_bind(arg1, arg2, arg3);
10137 #endif
10138 #ifdef TARGET_NR_connect
10139     case TARGET_NR_connect:
10140         return do_connect(arg1, arg2, arg3);
10141 #endif
10142 #ifdef TARGET_NR_getpeername
10143     case TARGET_NR_getpeername:
10144         return do_getpeername(arg1, arg2, arg3);
10145 #endif
10146 #ifdef TARGET_NR_getsockname
10147     case TARGET_NR_getsockname:
10148         return do_getsockname(arg1, arg2, arg3);
10149 #endif
10150 #ifdef TARGET_NR_getsockopt
10151     case TARGET_NR_getsockopt:
10152         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10153 #endif
10154 #ifdef TARGET_NR_listen
10155     case TARGET_NR_listen:
10156         return get_errno(listen(arg1, arg2));
10157 #endif
10158 #ifdef TARGET_NR_recv
10159     case TARGET_NR_recv:
10160         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10161 #endif
10162 #ifdef TARGET_NR_recvfrom
10163     case TARGET_NR_recvfrom:
10164         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10165 #endif
10166 #ifdef TARGET_NR_recvmsg
10167     case TARGET_NR_recvmsg:
10168         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10169 #endif
10170 #ifdef TARGET_NR_send
10171     case TARGET_NR_send:
10172         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10173 #endif
10174 #ifdef TARGET_NR_sendmsg
10175     case TARGET_NR_sendmsg:
10176         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10177 #endif
10178 #ifdef TARGET_NR_sendmmsg
10179     case TARGET_NR_sendmmsg:
10180         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10181 #endif
10182 #ifdef TARGET_NR_recvmmsg
10183     case TARGET_NR_recvmmsg:
10184         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10185 #endif
10186 #ifdef TARGET_NR_sendto
10187     case TARGET_NR_sendto:
10188         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10189 #endif
10190 #ifdef TARGET_NR_shutdown
10191     case TARGET_NR_shutdown:
10192         return get_errno(shutdown(arg1, arg2));
10193 #endif
10194 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10195     case TARGET_NR_getrandom:
10196         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10197         if (!p) {
10198             return -TARGET_EFAULT;
10199         }
10200         ret = get_errno(getrandom(p, arg2, arg3));
10201         unlock_user(p, arg1, ret);
10202         return ret;
10203 #endif
10204 #ifdef TARGET_NR_socket
10205     case TARGET_NR_socket:
10206         return do_socket(arg1, arg2, arg3);
10207 #endif
10208 #ifdef TARGET_NR_socketpair
10209     case TARGET_NR_socketpair:
10210         return do_socketpair(arg1, arg2, arg3, arg4);
10211 #endif
10212 #ifdef TARGET_NR_setsockopt
10213     case TARGET_NR_setsockopt:
10214         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10215 #endif
10216 #if defined(TARGET_NR_syslog)
10217     case TARGET_NR_syslog:
10218         {
10219             int len = arg2;
10220 
10221             switch (arg1) {
10222             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10223             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10224             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10225             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10226             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10227             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10228             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10229             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10230                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10231             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10232             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10233             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10234                 {
10235                     if (len < 0) {
10236                         return -TARGET_EINVAL;
10237                     }
10238                     if (len == 0) {
10239                         return 0;
10240                     }
10241                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10242                     if (!p) {
10243                         return -TARGET_EFAULT;
10244                     }
10245                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10246                     unlock_user(p, arg2, arg3);
10247                 }
10248                 return ret;
10249             default:
10250                 return -TARGET_EINVAL;
10251             }
10252         }
10253         break;
10254 #endif
10255     case TARGET_NR_setitimer:
10256         {
10257             struct itimerval value, ovalue, *pvalue;
10258 
10259             if (arg2) {
10260                 pvalue = &value;
10261                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10262                     || copy_from_user_timeval(&pvalue->it_value,
10263                                               arg2 + sizeof(struct target_timeval)))
10264                     return -TARGET_EFAULT;
10265             } else {
10266                 pvalue = NULL;
10267             }
10268             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10269             if (!is_error(ret) && arg3) {
10270                 if (copy_to_user_timeval(arg3,
10271                                          &ovalue.it_interval)
10272                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10273                                             &ovalue.it_value))
10274                     return -TARGET_EFAULT;
10275             }
10276         }
10277         return ret;
10278     case TARGET_NR_getitimer:
10279         {
10280             struct itimerval value;
10281 
10282             ret = get_errno(getitimer(arg1, &value));
10283             if (!is_error(ret) && arg2) {
10284                 if (copy_to_user_timeval(arg2,
10285                                          &value.it_interval)
10286                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10287                                             &value.it_value))
10288                     return -TARGET_EFAULT;
10289             }
10290         }
10291         return ret;
10292 #ifdef TARGET_NR_stat
10293     case TARGET_NR_stat:
10294         if (!(p = lock_user_string(arg1))) {
10295             return -TARGET_EFAULT;
10296         }
10297         ret = get_errno(stat(path(p), &st));
10298         unlock_user(p, arg1, 0);
10299         goto do_stat;
10300 #endif
10301 #ifdef TARGET_NR_lstat
10302     case TARGET_NR_lstat:
10303         if (!(p = lock_user_string(arg1))) {
10304             return -TARGET_EFAULT;
10305         }
10306         ret = get_errno(lstat(path(p), &st));
10307         unlock_user(p, arg1, 0);
10308         goto do_stat;
10309 #endif
10310 #ifdef TARGET_NR_fstat
10311     case TARGET_NR_fstat:
10312         {
10313             ret = get_errno(fstat(arg1, &st));
10314 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10315         do_stat:
10316 #endif
10317             if (!is_error(ret)) {
10318                 struct target_stat *target_st;
10319 
10320                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10321                     return -TARGET_EFAULT;
10322                 memset(target_st, 0, sizeof(*target_st));
10323                 __put_user(st.st_dev, &target_st->st_dev);
10324                 __put_user(st.st_ino, &target_st->st_ino);
10325                 __put_user(st.st_mode, &target_st->st_mode);
10326                 __put_user(st.st_uid, &target_st->st_uid);
10327                 __put_user(st.st_gid, &target_st->st_gid);
10328                 __put_user(st.st_nlink, &target_st->st_nlink);
10329                 __put_user(st.st_rdev, &target_st->st_rdev);
10330                 __put_user(st.st_size, &target_st->st_size);
10331                 __put_user(st.st_blksize, &target_st->st_blksize);
10332                 __put_user(st.st_blocks, &target_st->st_blocks);
10333                 __put_user(st.st_atime, &target_st->target_st_atime);
10334                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10335                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10336 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10337                 __put_user(st.st_atim.tv_nsec,
10338                            &target_st->target_st_atime_nsec);
10339                 __put_user(st.st_mtim.tv_nsec,
10340                            &target_st->target_st_mtime_nsec);
10341                 __put_user(st.st_ctim.tv_nsec,
10342                            &target_st->target_st_ctime_nsec);
10343 #endif
10344                 unlock_user_struct(target_st, arg2, 1);
10345             }
10346         }
10347         return ret;
10348 #endif
10349     case TARGET_NR_vhangup:
10350         return get_errno(vhangup());
10351 #ifdef TARGET_NR_syscall
10352     case TARGET_NR_syscall:
10353         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10354                           arg6, arg7, arg8, 0);
10355 #endif
10356 #if defined(TARGET_NR_wait4)
10357     case TARGET_NR_wait4:
10358         {
10359             int status;
10360             abi_long status_ptr = arg2;
10361             struct rusage rusage, *rusage_ptr;
10362             abi_ulong target_rusage = arg4;
10363             abi_long rusage_err;
10364             if (target_rusage)
10365                 rusage_ptr = &rusage;
10366             else
10367                 rusage_ptr = NULL;
10368             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10369             if (!is_error(ret)) {
10370                 if (status_ptr && ret) {
10371                     status = host_to_target_waitstatus(status);
10372                     if (put_user_s32(status, status_ptr))
10373                         return -TARGET_EFAULT;
10374                 }
10375                 if (target_rusage) {
10376                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10377                     if (rusage_err) {
10378                         ret = rusage_err;
10379                     }
10380                 }
10381             }
10382         }
10383         return ret;
10384 #endif
10385 #ifdef TARGET_NR_swapoff
10386     case TARGET_NR_swapoff:
10387         if (!(p = lock_user_string(arg1)))
10388             return -TARGET_EFAULT;
10389         ret = get_errno(swapoff(p));
10390         unlock_user(p, arg1, 0);
10391         return ret;
10392 #endif
10393     case TARGET_NR_sysinfo:
10394         {
10395             struct target_sysinfo *target_value;
10396             struct sysinfo value;
10397             ret = get_errno(sysinfo(&value));
10398             if (!is_error(ret) && arg1)
10399             {
10400                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10401                     return -TARGET_EFAULT;
10402                 __put_user(value.uptime, &target_value->uptime);
10403                 __put_user(value.loads[0], &target_value->loads[0]);
10404                 __put_user(value.loads[1], &target_value->loads[1]);
10405                 __put_user(value.loads[2], &target_value->loads[2]);
10406                 __put_user(value.totalram, &target_value->totalram);
10407                 __put_user(value.freeram, &target_value->freeram);
10408                 __put_user(value.sharedram, &target_value->sharedram);
10409                 __put_user(value.bufferram, &target_value->bufferram);
10410                 __put_user(value.totalswap, &target_value->totalswap);
10411                 __put_user(value.freeswap, &target_value->freeswap);
10412                 __put_user(value.procs, &target_value->procs);
10413                 __put_user(value.totalhigh, &target_value->totalhigh);
10414                 __put_user(value.freehigh, &target_value->freehigh);
10415                 __put_user(value.mem_unit, &target_value->mem_unit);
10416                 unlock_user_struct(target_value, arg1, 1);
10417             }
10418         }
10419         return ret;
10420 #ifdef TARGET_NR_ipc
10421     case TARGET_NR_ipc:
10422         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10423 #endif
10424 #ifdef TARGET_NR_semget
10425     case TARGET_NR_semget:
10426         return get_errno(semget(arg1, arg2, arg3));
10427 #endif
10428 #ifdef TARGET_NR_semop
10429     case TARGET_NR_semop:
10430         return do_semtimedop(arg1, arg2, arg3, 0, false);
10431 #endif
10432 #ifdef TARGET_NR_semtimedop
10433     case TARGET_NR_semtimedop:
10434         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10435 #endif
10436 #ifdef TARGET_NR_semtimedop_time64
10437     case TARGET_NR_semtimedop_time64:
10438         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10439 #endif
10440 #ifdef TARGET_NR_semctl
10441     case TARGET_NR_semctl:
10442         return do_semctl(arg1, arg2, arg3, arg4);
10443 #endif
10444 #ifdef TARGET_NR_msgctl
10445     case TARGET_NR_msgctl:
10446         return do_msgctl(arg1, arg2, arg3);
10447 #endif
10448 #ifdef TARGET_NR_msgget
10449     case TARGET_NR_msgget:
10450         return get_errno(msgget(arg1, arg2));
10451 #endif
10452 #ifdef TARGET_NR_msgrcv
10453     case TARGET_NR_msgrcv:
10454         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10455 #endif
10456 #ifdef TARGET_NR_msgsnd
10457     case TARGET_NR_msgsnd:
10458         return do_msgsnd(arg1, arg2, arg3, arg4);
10459 #endif
10460 #ifdef TARGET_NR_shmget
10461     case TARGET_NR_shmget:
10462         return get_errno(shmget(arg1, arg2, arg3));
10463 #endif
10464 #ifdef TARGET_NR_shmctl
10465     case TARGET_NR_shmctl:
10466         return do_shmctl(arg1, arg2, arg3);
10467 #endif
10468 #ifdef TARGET_NR_shmat
10469     case TARGET_NR_shmat:
10470         return do_shmat(cpu_env, arg1, arg2, arg3);
10471 #endif
10472 #ifdef TARGET_NR_shmdt
10473     case TARGET_NR_shmdt:
10474         return do_shmdt(arg1);
10475 #endif
10476     case TARGET_NR_fsync:
10477         return get_errno(fsync(arg1));
10478     case TARGET_NR_clone:
10479         /* Linux manages to have three different orderings for its
10480          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10481          * match the kernel's CONFIG_CLONE_* settings.
10482          * Microblaze is further special in that it uses a sixth
10483          * implicit argument to clone for the TLS pointer.
10484          */
10485 #if defined(TARGET_MICROBLAZE)
10486         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10487 #elif defined(TARGET_CLONE_BACKWARDS)
10488         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10489 #elif defined(TARGET_CLONE_BACKWARDS2)
10490         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10491 #else
10492         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10493 #endif
10494         return ret;
10495 #ifdef __NR_exit_group
10496         /* new thread calls */
10497     case TARGET_NR_exit_group:
10498         preexit_cleanup(cpu_env, arg1);
10499         return get_errno(exit_group(arg1));
10500 #endif
10501     case TARGET_NR_setdomainname:
10502         if (!(p = lock_user_string(arg1)))
10503             return -TARGET_EFAULT;
10504         ret = get_errno(setdomainname(p, arg2));
10505         unlock_user(p, arg1, 0);
10506         return ret;
10507     case TARGET_NR_uname:
10508         /* no need to transcode because we use the linux syscall */
10509         {
10510             struct new_utsname * buf;
10511 
10512             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10513                 return -TARGET_EFAULT;
10514             ret = get_errno(sys_uname(buf));
10515             if (!is_error(ret)) {
10516                 /* Overwrite the native machine name with whatever is being
10517                    emulated. */
10518                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10519                           sizeof(buf->machine));
10520                 /* Allow the user to override the reported release.  */
10521                 if (qemu_uname_release && *qemu_uname_release) {
10522                     g_strlcpy(buf->release, qemu_uname_release,
10523                               sizeof(buf->release));
10524                 }
10525             }
10526             unlock_user_struct(buf, arg1, 1);
10527         }
10528         return ret;
10529 #ifdef TARGET_I386
10530     case TARGET_NR_modify_ldt:
10531         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10532 #if !defined(TARGET_X86_64)
10533     case TARGET_NR_vm86:
10534         return do_vm86(cpu_env, arg1, arg2);
10535 #endif
10536 #endif
10537 #if defined(TARGET_NR_adjtimex)
10538     case TARGET_NR_adjtimex:
10539         {
10540             struct timex host_buf;
10541 
10542             if (target_to_host_timex(&host_buf, arg1) != 0) {
10543                 return -TARGET_EFAULT;
10544             }
10545             ret = get_errno(adjtimex(&host_buf));
10546             if (!is_error(ret)) {
10547                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10548                     return -TARGET_EFAULT;
10549                 }
10550             }
10551         }
10552         return ret;
10553 #endif
10554 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10555     case TARGET_NR_clock_adjtime:
10556         {
10557             struct timex htx, *phtx = &htx;
10558 
10559             if (target_to_host_timex(phtx, arg2) != 0) {
10560                 return -TARGET_EFAULT;
10561             }
10562             ret = get_errno(clock_adjtime(arg1, phtx));
10563             if (!is_error(ret) && phtx) {
10564                 if (host_to_target_timex(arg2, phtx) != 0) {
10565                     return -TARGET_EFAULT;
10566                 }
10567             }
10568         }
10569         return ret;
10570 #endif
10571 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10572     case TARGET_NR_clock_adjtime64:
10573         {
10574             struct timex htx;
10575 
10576             if (target_to_host_timex64(&htx, arg2) != 0) {
10577                 return -TARGET_EFAULT;
10578             }
10579             ret = get_errno(clock_adjtime(arg1, &htx));
10580             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10581                     return -TARGET_EFAULT;
10582             }
10583         }
10584         return ret;
10585 #endif
10586     case TARGET_NR_getpgid:
10587         return get_errno(getpgid(arg1));
10588     case TARGET_NR_fchdir:
10589         return get_errno(fchdir(arg1));
10590     case TARGET_NR_personality:
10591         return get_errno(personality(arg1));
10592 #ifdef TARGET_NR__llseek /* Not on alpha */
10593     case TARGET_NR__llseek:
10594         {
10595             int64_t res;
10596 #if !defined(__NR_llseek)
10597             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10598             if (res == -1) {
10599                 ret = get_errno(res);
10600             } else {
10601                 ret = 0;
10602             }
10603 #else
10604             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10605 #endif
10606             if ((ret == 0) && put_user_s64(res, arg4)) {
10607                 return -TARGET_EFAULT;
10608             }
10609         }
10610         return ret;
10611 #endif
10612 #ifdef TARGET_NR_getdents
10613     case TARGET_NR_getdents:
10614         return do_getdents(arg1, arg2, arg3);
10615 #endif /* TARGET_NR_getdents */
10616 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10617     case TARGET_NR_getdents64:
10618         return do_getdents64(arg1, arg2, arg3);
10619 #endif /* TARGET_NR_getdents64 */
10620 #if defined(TARGET_NR__newselect)
10621     case TARGET_NR__newselect:
10622         return do_select(arg1, arg2, arg3, arg4, arg5);
10623 #endif
10624 #ifdef TARGET_NR_poll
10625     case TARGET_NR_poll:
10626         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10627 #endif
10628 #ifdef TARGET_NR_ppoll
10629     case TARGET_NR_ppoll:
10630         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10631 #endif
10632 #ifdef TARGET_NR_ppoll_time64
10633     case TARGET_NR_ppoll_time64:
10634         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10635 #endif
10636     case TARGET_NR_flock:
10637         /* NOTE: the flock constant seems to be the same for every
10638            Linux platform */
10639         return get_errno(safe_flock(arg1, arg2));
10640     case TARGET_NR_readv:
10641         {
10642             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10643             if (vec != NULL) {
10644                 ret = get_errno(safe_readv(arg1, vec, arg3));
10645                 unlock_iovec(vec, arg2, arg3, 1);
10646             } else {
10647                 ret = -host_to_target_errno(errno);
10648             }
10649         }
10650         return ret;
10651     case TARGET_NR_writev:
10652         {
10653             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10654             if (vec != NULL) {
10655                 ret = get_errno(safe_writev(arg1, vec, arg3));
10656                 unlock_iovec(vec, arg2, arg3, 0);
10657             } else {
10658                 ret = -host_to_target_errno(errno);
10659             }
10660         }
10661         return ret;
10662 #if defined(TARGET_NR_preadv)
10663     case TARGET_NR_preadv:
10664         {
10665             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10666             if (vec != NULL) {
10667                 unsigned long low, high;
10668 
10669                 target_to_host_low_high(arg4, arg5, &low, &high);
10670                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10671                 unlock_iovec(vec, arg2, arg3, 1);
10672             } else {
10673                 ret = -host_to_target_errno(errno);
10674            }
10675         }
10676         return ret;
10677 #endif
10678 #if defined(TARGET_NR_pwritev)
10679     case TARGET_NR_pwritev:
10680         {
10681             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10682             if (vec != NULL) {
10683                 unsigned long low, high;
10684 
10685                 target_to_host_low_high(arg4, arg5, &low, &high);
10686                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10687                 unlock_iovec(vec, arg2, arg3, 0);
10688             } else {
10689                 ret = -host_to_target_errno(errno);
10690            }
10691         }
10692         return ret;
10693 #endif
10694     case TARGET_NR_getsid:
10695         return get_errno(getsid(arg1));
10696 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10697     case TARGET_NR_fdatasync:
10698         return get_errno(fdatasync(arg1));
10699 #endif
10700     case TARGET_NR_sched_getaffinity:
10701         {
10702             unsigned int mask_size;
10703             unsigned long *mask;
10704 
10705             /*
10706              * sched_getaffinity needs multiples of ulong, so need to take
10707              * care of mismatches between target ulong and host ulong sizes.
10708              */
10709             if (arg2 & (sizeof(abi_ulong) - 1)) {
10710                 return -TARGET_EINVAL;
10711             }
10712             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10713 
10714             mask = alloca(mask_size);
10715             memset(mask, 0, mask_size);
10716             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10717 
10718             if (!is_error(ret)) {
10719                 if (ret > arg2) {
10720                     /* More data returned than the caller's buffer will fit.
10721                      * This only happens if sizeof(abi_long) < sizeof(long)
10722                      * and the caller passed us a buffer holding an odd number
10723                      * of abi_longs. If the host kernel is actually using the
10724                      * extra 4 bytes then fail EINVAL; otherwise we can just
10725                      * ignore them and only copy the interesting part.
10726                      */
10727                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10728                     if (numcpus > arg2 * 8) {
10729                         return -TARGET_EINVAL;
10730                     }
10731                     ret = arg2;
10732                 }
10733 
10734                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10735                     return -TARGET_EFAULT;
10736                 }
10737             }
10738         }
10739         return ret;
10740     case TARGET_NR_sched_setaffinity:
10741         {
10742             unsigned int mask_size;
10743             unsigned long *mask;
10744 
10745             /*
10746              * sched_setaffinity needs multiples of ulong, so need to take
10747              * care of mismatches between target ulong and host ulong sizes.
10748              */
10749             if (arg2 & (sizeof(abi_ulong) - 1)) {
10750                 return -TARGET_EINVAL;
10751             }
10752             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10753             mask = alloca(mask_size);
10754 
10755             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10756             if (ret) {
10757                 return ret;
10758             }
10759 
10760             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10761         }
10762     case TARGET_NR_getcpu:
10763         {
10764             unsigned cpu, node;
10765             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10766                                        arg2 ? &node : NULL,
10767                                        NULL));
10768             if (is_error(ret)) {
10769                 return ret;
10770             }
10771             if (arg1 && put_user_u32(cpu, arg1)) {
10772                 return -TARGET_EFAULT;
10773             }
10774             if (arg2 && put_user_u32(node, arg2)) {
10775                 return -TARGET_EFAULT;
10776             }
10777         }
10778         return ret;
10779     case TARGET_NR_sched_setparam:
10780         {
10781             struct target_sched_param *target_schp;
10782             struct sched_param schp;
10783 
10784             if (arg2 == 0) {
10785                 return -TARGET_EINVAL;
10786             }
10787             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10788                 return -TARGET_EFAULT;
10789             }
10790             schp.sched_priority = tswap32(target_schp->sched_priority);
10791             unlock_user_struct(target_schp, arg2, 0);
10792             return get_errno(sys_sched_setparam(arg1, &schp));
10793         }
10794     case TARGET_NR_sched_getparam:
10795         {
10796             struct target_sched_param *target_schp;
10797             struct sched_param schp;
10798 
10799             if (arg2 == 0) {
10800                 return -TARGET_EINVAL;
10801             }
10802             ret = get_errno(sys_sched_getparam(arg1, &schp));
10803             if (!is_error(ret)) {
10804                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10805                     return -TARGET_EFAULT;
10806                 }
10807                 target_schp->sched_priority = tswap32(schp.sched_priority);
10808                 unlock_user_struct(target_schp, arg2, 1);
10809             }
10810         }
10811         return ret;
10812     case TARGET_NR_sched_setscheduler:
10813         {
10814             struct target_sched_param *target_schp;
10815             struct sched_param schp;
10816             if (arg3 == 0) {
10817                 return -TARGET_EINVAL;
10818             }
10819             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10820                 return -TARGET_EFAULT;
10821             }
10822             schp.sched_priority = tswap32(target_schp->sched_priority);
10823             unlock_user_struct(target_schp, arg3, 0);
10824             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10825         }
10826     case TARGET_NR_sched_getscheduler:
10827         return get_errno(sys_sched_getscheduler(arg1));
10828     case TARGET_NR_sched_getattr:
10829         {
10830             struct target_sched_attr *target_scha;
10831             struct sched_attr scha;
10832             if (arg2 == 0) {
10833                 return -TARGET_EINVAL;
10834             }
10835             if (arg3 > sizeof(scha)) {
10836                 arg3 = sizeof(scha);
10837             }
10838             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10839             if (!is_error(ret)) {
10840                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10841                 if (!target_scha) {
10842                     return -TARGET_EFAULT;
10843                 }
10844                 target_scha->size = tswap32(scha.size);
10845                 target_scha->sched_policy = tswap32(scha.sched_policy);
10846                 target_scha->sched_flags = tswap64(scha.sched_flags);
10847                 target_scha->sched_nice = tswap32(scha.sched_nice);
10848                 target_scha->sched_priority = tswap32(scha.sched_priority);
10849                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10850                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10851                 target_scha->sched_period = tswap64(scha.sched_period);
10852                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10853                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10854                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10855                 }
10856                 unlock_user(target_scha, arg2, arg3);
10857             }
10858             return ret;
10859         }
10860     case TARGET_NR_sched_setattr:
10861         {
10862             struct target_sched_attr *target_scha;
10863             struct sched_attr scha;
10864             uint32_t size;
10865             int zeroed;
10866             if (arg2 == 0) {
10867                 return -TARGET_EINVAL;
10868             }
10869             if (get_user_u32(size, arg2)) {
10870                 return -TARGET_EFAULT;
10871             }
10872             if (!size) {
10873                 size = offsetof(struct target_sched_attr, sched_util_min);
10874             }
10875             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10876                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10877                     return -TARGET_EFAULT;
10878                 }
10879                 return -TARGET_E2BIG;
10880             }
10881 
10882             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10883             if (zeroed < 0) {
10884                 return zeroed;
10885             } else if (zeroed == 0) {
10886                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10887                     return -TARGET_EFAULT;
10888                 }
10889                 return -TARGET_E2BIG;
10890             }
10891             if (size > sizeof(struct target_sched_attr)) {
10892                 size = sizeof(struct target_sched_attr);
10893             }
10894 
10895             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10896             if (!target_scha) {
10897                 return -TARGET_EFAULT;
10898             }
10899             scha.size = size;
10900             scha.sched_policy = tswap32(target_scha->sched_policy);
10901             scha.sched_flags = tswap64(target_scha->sched_flags);
10902             scha.sched_nice = tswap32(target_scha->sched_nice);
10903             scha.sched_priority = tswap32(target_scha->sched_priority);
10904             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10905             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10906             scha.sched_period = tswap64(target_scha->sched_period);
10907             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10908                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10909                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10910             }
10911             unlock_user(target_scha, arg2, 0);
10912             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10913         }
10914     case TARGET_NR_sched_yield:
10915         return get_errno(sched_yield());
10916     case TARGET_NR_sched_get_priority_max:
10917         return get_errno(sched_get_priority_max(arg1));
10918     case TARGET_NR_sched_get_priority_min:
10919         return get_errno(sched_get_priority_min(arg1));
10920 #ifdef TARGET_NR_sched_rr_get_interval
10921     case TARGET_NR_sched_rr_get_interval:
10922         {
10923             struct timespec ts;
10924             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10925             if (!is_error(ret)) {
10926                 ret = host_to_target_timespec(arg2, &ts);
10927             }
10928         }
10929         return ret;
10930 #endif
10931 #ifdef TARGET_NR_sched_rr_get_interval_time64
10932     case TARGET_NR_sched_rr_get_interval_time64:
10933         {
10934             struct timespec ts;
10935             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10936             if (!is_error(ret)) {
10937                 ret = host_to_target_timespec64(arg2, &ts);
10938             }
10939         }
10940         return ret;
10941 #endif
10942 #if defined(TARGET_NR_nanosleep)
10943     case TARGET_NR_nanosleep:
10944         {
10945             struct timespec req, rem;
10946             target_to_host_timespec(&req, arg1);
10947             ret = get_errno(safe_nanosleep(&req, &rem));
10948             if (is_error(ret) && arg2) {
10949                 host_to_target_timespec(arg2, &rem);
10950             }
10951         }
10952         return ret;
10953 #endif
10954     case TARGET_NR_prctl:
10955         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10956         break;
10957 #ifdef TARGET_NR_arch_prctl
10958     case TARGET_NR_arch_prctl:
10959         return do_arch_prctl(cpu_env, arg1, arg2);
10960 #endif
10961 #ifdef TARGET_NR_pread64
10962     case TARGET_NR_pread64:
10963         if (regpairs_aligned(cpu_env, num)) {
10964             arg4 = arg5;
10965             arg5 = arg6;
10966         }
10967         if (arg2 == 0 && arg3 == 0) {
10968             /* Special-case NULL buffer and zero length, which should succeed */
10969             p = 0;
10970         } else {
10971             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10972             if (!p) {
10973                 return -TARGET_EFAULT;
10974             }
10975         }
10976         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10977         unlock_user(p, arg2, ret);
10978         return ret;
10979     case TARGET_NR_pwrite64:
10980         if (regpairs_aligned(cpu_env, num)) {
10981             arg4 = arg5;
10982             arg5 = arg6;
10983         }
10984         if (arg2 == 0 && arg3 == 0) {
10985             /* Special-case NULL buffer and zero length, which should succeed */
10986             p = 0;
10987         } else {
10988             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10989             if (!p) {
10990                 return -TARGET_EFAULT;
10991             }
10992         }
10993         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10994         unlock_user(p, arg2, 0);
10995         return ret;
10996 #endif
10997     case TARGET_NR_getcwd:
10998         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10999             return -TARGET_EFAULT;
11000         ret = get_errno(sys_getcwd1(p, arg2));
11001         unlock_user(p, arg1, ret);
11002         return ret;
11003     case TARGET_NR_capget:
11004     case TARGET_NR_capset:
11005     {
11006         struct target_user_cap_header *target_header;
11007         struct target_user_cap_data *target_data = NULL;
11008         struct __user_cap_header_struct header;
11009         struct __user_cap_data_struct data[2];
11010         struct __user_cap_data_struct *dataptr = NULL;
11011         int i, target_datalen;
11012         int data_items = 1;
11013 
11014         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11015             return -TARGET_EFAULT;
11016         }
11017         header.version = tswap32(target_header->version);
11018         header.pid = tswap32(target_header->pid);
11019 
11020         if (header.version != _LINUX_CAPABILITY_VERSION) {
11021             /* Version 2 and up takes pointer to two user_data structs */
11022             data_items = 2;
11023         }
11024 
11025         target_datalen = sizeof(*target_data) * data_items;
11026 
11027         if (arg2) {
11028             if (num == TARGET_NR_capget) {
11029                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11030             } else {
11031                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11032             }
11033             if (!target_data) {
11034                 unlock_user_struct(target_header, arg1, 0);
11035                 return -TARGET_EFAULT;
11036             }
11037 
11038             if (num == TARGET_NR_capset) {
11039                 for (i = 0; i < data_items; i++) {
11040                     data[i].effective = tswap32(target_data[i].effective);
11041                     data[i].permitted = tswap32(target_data[i].permitted);
11042                     data[i].inheritable = tswap32(target_data[i].inheritable);
11043                 }
11044             }
11045 
11046             dataptr = data;
11047         }
11048 
11049         if (num == TARGET_NR_capget) {
11050             ret = get_errno(capget(&header, dataptr));
11051         } else {
11052             ret = get_errno(capset(&header, dataptr));
11053         }
11054 
11055         /* The kernel always updates version for both capget and capset */
11056         target_header->version = tswap32(header.version);
11057         unlock_user_struct(target_header, arg1, 1);
11058 
11059         if (arg2) {
11060             if (num == TARGET_NR_capget) {
11061                 for (i = 0; i < data_items; i++) {
11062                     target_data[i].effective = tswap32(data[i].effective);
11063                     target_data[i].permitted = tswap32(data[i].permitted);
11064                     target_data[i].inheritable = tswap32(data[i].inheritable);
11065                 }
11066                 unlock_user(target_data, arg2, target_datalen);
11067             } else {
11068                 unlock_user(target_data, arg2, 0);
11069             }
11070         }
11071         return ret;
11072     }
11073     case TARGET_NR_sigaltstack:
11074         return do_sigaltstack(arg1, arg2, cpu_env);
11075 
11076 #ifdef CONFIG_SENDFILE
11077 #ifdef TARGET_NR_sendfile
11078     case TARGET_NR_sendfile:
11079     {
11080         off_t *offp = NULL;
11081         off_t off;
11082         if (arg3) {
11083             ret = get_user_sal(off, arg3);
11084             if (is_error(ret)) {
11085                 return ret;
11086             }
11087             offp = &off;
11088         }
11089         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11090         if (!is_error(ret) && arg3) {
11091             abi_long ret2 = put_user_sal(off, arg3);
11092             if (is_error(ret2)) {
11093                 ret = ret2;
11094             }
11095         }
11096         return ret;
11097     }
11098 #endif
11099 #ifdef TARGET_NR_sendfile64
11100     case TARGET_NR_sendfile64:
11101     {
11102         off_t *offp = NULL;
11103         off_t off;
11104         if (arg3) {
11105             ret = get_user_s64(off, arg3);
11106             if (is_error(ret)) {
11107                 return ret;
11108             }
11109             offp = &off;
11110         }
11111         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11112         if (!is_error(ret) && arg3) {
11113             abi_long ret2 = put_user_s64(off, arg3);
11114             if (is_error(ret2)) {
11115                 ret = ret2;
11116             }
11117         }
11118         return ret;
11119     }
11120 #endif
11121 #endif
11122 #ifdef TARGET_NR_vfork
11123     case TARGET_NR_vfork:
11124         return get_errno(do_fork(cpu_env,
11125                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11126                          0, 0, 0, 0));
11127 #endif
11128 #ifdef TARGET_NR_ugetrlimit
11129     case TARGET_NR_ugetrlimit:
11130     {
11131 	struct rlimit rlim;
11132 	int resource = target_to_host_resource(arg1);
11133 	ret = get_errno(getrlimit(resource, &rlim));
11134 	if (!is_error(ret)) {
11135 	    struct target_rlimit *target_rlim;
11136             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11137                 return -TARGET_EFAULT;
11138 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11139 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11140             unlock_user_struct(target_rlim, arg2, 1);
11141 	}
11142         return ret;
11143     }
11144 #endif
11145 #ifdef TARGET_NR_truncate64
11146     case TARGET_NR_truncate64:
11147         if (!(p = lock_user_string(arg1)))
11148             return -TARGET_EFAULT;
11149 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11150         unlock_user(p, arg1, 0);
11151         return ret;
11152 #endif
11153 #ifdef TARGET_NR_ftruncate64
11154     case TARGET_NR_ftruncate64:
11155         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11156 #endif
11157 #ifdef TARGET_NR_stat64
11158     case TARGET_NR_stat64:
11159         if (!(p = lock_user_string(arg1))) {
11160             return -TARGET_EFAULT;
11161         }
11162         ret = get_errno(stat(path(p), &st));
11163         unlock_user(p, arg1, 0);
11164         if (!is_error(ret))
11165             ret = host_to_target_stat64(cpu_env, arg2, &st);
11166         return ret;
11167 #endif
11168 #ifdef TARGET_NR_lstat64
11169     case TARGET_NR_lstat64:
11170         if (!(p = lock_user_string(arg1))) {
11171             return -TARGET_EFAULT;
11172         }
11173         ret = get_errno(lstat(path(p), &st));
11174         unlock_user(p, arg1, 0);
11175         if (!is_error(ret))
11176             ret = host_to_target_stat64(cpu_env, arg2, &st);
11177         return ret;
11178 #endif
11179 #ifdef TARGET_NR_fstat64
11180     case TARGET_NR_fstat64:
11181         ret = get_errno(fstat(arg1, &st));
11182         if (!is_error(ret))
11183             ret = host_to_target_stat64(cpu_env, arg2, &st);
11184         return ret;
11185 #endif
11186 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11187 #ifdef TARGET_NR_fstatat64
11188     case TARGET_NR_fstatat64:
11189 #endif
11190 #ifdef TARGET_NR_newfstatat
11191     case TARGET_NR_newfstatat:
11192 #endif
11193         if (!(p = lock_user_string(arg2))) {
11194             return -TARGET_EFAULT;
11195         }
11196         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11197         unlock_user(p, arg2, 0);
11198         if (!is_error(ret))
11199             ret = host_to_target_stat64(cpu_env, arg3, &st);
11200         return ret;
11201 #endif
11202 #if defined(TARGET_NR_statx)
11203     case TARGET_NR_statx:
11204         {
11205             struct target_statx *target_stx;
11206             int dirfd = arg1;
11207             int flags = arg3;
11208 
11209             p = lock_user_string(arg2);
11210             if (p == NULL) {
11211                 return -TARGET_EFAULT;
11212             }
11213 #if defined(__NR_statx)
11214             {
11215                 /*
11216                  * It is assumed that struct statx is architecture independent.
11217                  */
11218                 struct target_statx host_stx;
11219                 int mask = arg4;
11220 
11221                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11222                 if (!is_error(ret)) {
11223                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11224                         unlock_user(p, arg2, 0);
11225                         return -TARGET_EFAULT;
11226                     }
11227                 }
11228 
11229                 if (ret != -TARGET_ENOSYS) {
11230                     unlock_user(p, arg2, 0);
11231                     return ret;
11232                 }
11233             }
11234 #endif
11235             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11236             unlock_user(p, arg2, 0);
11237 
11238             if (!is_error(ret)) {
11239                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11240                     return -TARGET_EFAULT;
11241                 }
11242                 memset(target_stx, 0, sizeof(*target_stx));
11243                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11244                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11245                 __put_user(st.st_ino, &target_stx->stx_ino);
11246                 __put_user(st.st_mode, &target_stx->stx_mode);
11247                 __put_user(st.st_uid, &target_stx->stx_uid);
11248                 __put_user(st.st_gid, &target_stx->stx_gid);
11249                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11250                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11251                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11252                 __put_user(st.st_size, &target_stx->stx_size);
11253                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11254                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11255                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11256                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11257                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11258                 unlock_user_struct(target_stx, arg5, 1);
11259             }
11260         }
11261         return ret;
11262 #endif
11263 #ifdef TARGET_NR_lchown
11264     case TARGET_NR_lchown:
11265         if (!(p = lock_user_string(arg1)))
11266             return -TARGET_EFAULT;
11267         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11268         unlock_user(p, arg1, 0);
11269         return ret;
11270 #endif
11271 #ifdef TARGET_NR_getuid
11272     case TARGET_NR_getuid:
11273         return get_errno(high2lowuid(getuid()));
11274 #endif
11275 #ifdef TARGET_NR_getgid
11276     case TARGET_NR_getgid:
11277         return get_errno(high2lowgid(getgid()));
11278 #endif
11279 #ifdef TARGET_NR_geteuid
11280     case TARGET_NR_geteuid:
11281         return get_errno(high2lowuid(geteuid()));
11282 #endif
11283 #ifdef TARGET_NR_getegid
11284     case TARGET_NR_getegid:
11285         return get_errno(high2lowgid(getegid()));
11286 #endif
11287     case TARGET_NR_setreuid:
11288         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11289     case TARGET_NR_setregid:
11290         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11291     case TARGET_NR_getgroups:
11292         {
11293             int gidsetsize = arg1;
11294             target_id *target_grouplist;
11295             gid_t *grouplist;
11296             int i;
11297 
11298             grouplist = alloca(gidsetsize * sizeof(gid_t));
11299             ret = get_errno(getgroups(gidsetsize, grouplist));
11300             if (gidsetsize == 0)
11301                 return ret;
11302             if (!is_error(ret)) {
11303                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11304                 if (!target_grouplist)
11305                     return -TARGET_EFAULT;
11306                 for(i = 0;i < ret; i++)
11307                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11308                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11309             }
11310         }
11311         return ret;
11312     case TARGET_NR_setgroups:
11313         {
11314             int gidsetsize = arg1;
11315             target_id *target_grouplist;
11316             gid_t *grouplist = NULL;
11317             int i;
11318             if (gidsetsize) {
11319                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11320                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11321                 if (!target_grouplist) {
11322                     return -TARGET_EFAULT;
11323                 }
11324                 for (i = 0; i < gidsetsize; i++) {
11325                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11326                 }
11327                 unlock_user(target_grouplist, arg2, 0);
11328             }
11329             return get_errno(setgroups(gidsetsize, grouplist));
11330         }
11331     case TARGET_NR_fchown:
11332         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11333 #if defined(TARGET_NR_fchownat)
11334     case TARGET_NR_fchownat:
11335         if (!(p = lock_user_string(arg2)))
11336             return -TARGET_EFAULT;
11337         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11338                                  low2highgid(arg4), arg5));
11339         unlock_user(p, arg2, 0);
11340         return ret;
11341 #endif
11342 #ifdef TARGET_NR_setresuid
11343     case TARGET_NR_setresuid:
11344         return get_errno(sys_setresuid(low2highuid(arg1),
11345                                        low2highuid(arg2),
11346                                        low2highuid(arg3)));
11347 #endif
11348 #ifdef TARGET_NR_getresuid
11349     case TARGET_NR_getresuid:
11350         {
11351             uid_t ruid, euid, suid;
11352             ret = get_errno(getresuid(&ruid, &euid, &suid));
11353             if (!is_error(ret)) {
11354                 if (put_user_id(high2lowuid(ruid), arg1)
11355                     || put_user_id(high2lowuid(euid), arg2)
11356                     || put_user_id(high2lowuid(suid), arg3))
11357                     return -TARGET_EFAULT;
11358             }
11359         }
11360         return ret;
11361 #endif
11362 #ifdef TARGET_NR_getresgid
11363     case TARGET_NR_setresgid:
11364         return get_errno(sys_setresgid(low2highgid(arg1),
11365                                        low2highgid(arg2),
11366                                        low2highgid(arg3)));
11367 #endif
11368 #ifdef TARGET_NR_getresgid
11369     case TARGET_NR_getresgid:
11370         {
11371             gid_t rgid, egid, sgid;
11372             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11373             if (!is_error(ret)) {
11374                 if (put_user_id(high2lowgid(rgid), arg1)
11375                     || put_user_id(high2lowgid(egid), arg2)
11376                     || put_user_id(high2lowgid(sgid), arg3))
11377                     return -TARGET_EFAULT;
11378             }
11379         }
11380         return ret;
11381 #endif
11382 #ifdef TARGET_NR_chown
11383     case TARGET_NR_chown:
11384         if (!(p = lock_user_string(arg1)))
11385             return -TARGET_EFAULT;
11386         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11387         unlock_user(p, arg1, 0);
11388         return ret;
11389 #endif
11390     case TARGET_NR_setuid:
11391         return get_errno(sys_setuid(low2highuid(arg1)));
11392     case TARGET_NR_setgid:
11393         return get_errno(sys_setgid(low2highgid(arg1)));
11394     case TARGET_NR_setfsuid:
11395         return get_errno(setfsuid(arg1));
11396     case TARGET_NR_setfsgid:
11397         return get_errno(setfsgid(arg1));
11398 
11399 #ifdef TARGET_NR_lchown32
11400     case TARGET_NR_lchown32:
11401         if (!(p = lock_user_string(arg1)))
11402             return -TARGET_EFAULT;
11403         ret = get_errno(lchown(p, arg2, arg3));
11404         unlock_user(p, arg1, 0);
11405         return ret;
11406 #endif
11407 #ifdef TARGET_NR_getuid32
11408     case TARGET_NR_getuid32:
11409         return get_errno(getuid());
11410 #endif
11411 
11412 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11413    /* Alpha specific */
11414     case TARGET_NR_getxuid:
11415          {
11416             uid_t euid;
11417             euid=geteuid();
11418             cpu_env->ir[IR_A4]=euid;
11419          }
11420         return get_errno(getuid());
11421 #endif
11422 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11423    /* Alpha specific */
11424     case TARGET_NR_getxgid:
11425          {
11426             uid_t egid;
11427             egid=getegid();
11428             cpu_env->ir[IR_A4]=egid;
11429          }
11430         return get_errno(getgid());
11431 #endif
11432 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11433     /* Alpha specific */
11434     case TARGET_NR_osf_getsysinfo:
11435         ret = -TARGET_EOPNOTSUPP;
11436         switch (arg1) {
11437           case TARGET_GSI_IEEE_FP_CONTROL:
11438             {
11439                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11440                 uint64_t swcr = cpu_env->swcr;
11441 
11442                 swcr &= ~SWCR_STATUS_MASK;
11443                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11444 
11445                 if (put_user_u64 (swcr, arg2))
11446                         return -TARGET_EFAULT;
11447                 ret = 0;
11448             }
11449             break;
11450 
11451           /* case GSI_IEEE_STATE_AT_SIGNAL:
11452              -- Not implemented in linux kernel.
11453              case GSI_UACPROC:
11454              -- Retrieves current unaligned access state; not much used.
11455              case GSI_PROC_TYPE:
11456              -- Retrieves implver information; surely not used.
11457              case GSI_GET_HWRPB:
11458              -- Grabs a copy of the HWRPB; surely not used.
11459           */
11460         }
11461         return ret;
11462 #endif
11463 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11464     /* Alpha specific */
11465     case TARGET_NR_osf_setsysinfo:
11466         ret = -TARGET_EOPNOTSUPP;
11467         switch (arg1) {
11468           case TARGET_SSI_IEEE_FP_CONTROL:
11469             {
11470                 uint64_t swcr, fpcr;
11471 
11472                 if (get_user_u64 (swcr, arg2)) {
11473                     return -TARGET_EFAULT;
11474                 }
11475 
11476                 /*
11477                  * The kernel calls swcr_update_status to update the
11478                  * status bits from the fpcr at every point that it
11479                  * could be queried.  Therefore, we store the status
11480                  * bits only in FPCR.
11481                  */
11482                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11483 
11484                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11485                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11486                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11487                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11488                 ret = 0;
11489             }
11490             break;
11491 
11492           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11493             {
11494                 uint64_t exc, fpcr, fex;
11495 
11496                 if (get_user_u64(exc, arg2)) {
11497                     return -TARGET_EFAULT;
11498                 }
11499                 exc &= SWCR_STATUS_MASK;
11500                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11501 
11502                 /* Old exceptions are not signaled.  */
11503                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11504                 fex = exc & ~fex;
11505                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11506                 fex &= (cpu_env)->swcr;
11507 
11508                 /* Update the hardware fpcr.  */
11509                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11510                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11511 
11512                 if (fex) {
11513                     int si_code = TARGET_FPE_FLTUNK;
11514                     target_siginfo_t info;
11515 
11516                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11517                         si_code = TARGET_FPE_FLTUND;
11518                     }
11519                     if (fex & SWCR_TRAP_ENABLE_INE) {
11520                         si_code = TARGET_FPE_FLTRES;
11521                     }
11522                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11523                         si_code = TARGET_FPE_FLTUND;
11524                     }
11525                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11526                         si_code = TARGET_FPE_FLTOVF;
11527                     }
11528                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11529                         si_code = TARGET_FPE_FLTDIV;
11530                     }
11531                     if (fex & SWCR_TRAP_ENABLE_INV) {
11532                         si_code = TARGET_FPE_FLTINV;
11533                     }
11534 
11535                     info.si_signo = SIGFPE;
11536                     info.si_errno = 0;
11537                     info.si_code = si_code;
11538                     info._sifields._sigfault._addr = (cpu_env)->pc;
11539                     queue_signal(cpu_env, info.si_signo,
11540                                  QEMU_SI_FAULT, &info);
11541                 }
11542                 ret = 0;
11543             }
11544             break;
11545 
11546           /* case SSI_NVPAIRS:
11547              -- Used with SSIN_UACPROC to enable unaligned accesses.
11548              case SSI_IEEE_STATE_AT_SIGNAL:
11549              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11550              -- Not implemented in linux kernel
11551           */
11552         }
11553         return ret;
11554 #endif
11555 #ifdef TARGET_NR_osf_sigprocmask
11556     /* Alpha specific.  */
11557     case TARGET_NR_osf_sigprocmask:
11558         {
11559             abi_ulong mask;
11560             int how;
11561             sigset_t set, oldset;
11562 
11563             switch(arg1) {
11564             case TARGET_SIG_BLOCK:
11565                 how = SIG_BLOCK;
11566                 break;
11567             case TARGET_SIG_UNBLOCK:
11568                 how = SIG_UNBLOCK;
11569                 break;
11570             case TARGET_SIG_SETMASK:
11571                 how = SIG_SETMASK;
11572                 break;
11573             default:
11574                 return -TARGET_EINVAL;
11575             }
11576             mask = arg2;
11577             target_to_host_old_sigset(&set, &mask);
11578             ret = do_sigprocmask(how, &set, &oldset);
11579             if (!ret) {
11580                 host_to_target_old_sigset(&mask, &oldset);
11581                 ret = mask;
11582             }
11583         }
11584         return ret;
11585 #endif
11586 
11587 #ifdef TARGET_NR_getgid32
11588     case TARGET_NR_getgid32:
11589         return get_errno(getgid());
11590 #endif
11591 #ifdef TARGET_NR_geteuid32
11592     case TARGET_NR_geteuid32:
11593         return get_errno(geteuid());
11594 #endif
11595 #ifdef TARGET_NR_getegid32
11596     case TARGET_NR_getegid32:
11597         return get_errno(getegid());
11598 #endif
11599 #ifdef TARGET_NR_setreuid32
11600     case TARGET_NR_setreuid32:
11601         return get_errno(setreuid(arg1, arg2));
11602 #endif
11603 #ifdef TARGET_NR_setregid32
11604     case TARGET_NR_setregid32:
11605         return get_errno(setregid(arg1, arg2));
11606 #endif
11607 #ifdef TARGET_NR_getgroups32
11608     case TARGET_NR_getgroups32:
11609         {
11610             int gidsetsize = arg1;
11611             uint32_t *target_grouplist;
11612             gid_t *grouplist;
11613             int i;
11614 
11615             grouplist = alloca(gidsetsize * sizeof(gid_t));
11616             ret = get_errno(getgroups(gidsetsize, grouplist));
11617             if (gidsetsize == 0)
11618                 return ret;
11619             if (!is_error(ret)) {
11620                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11621                 if (!target_grouplist) {
11622                     return -TARGET_EFAULT;
11623                 }
11624                 for(i = 0;i < ret; i++)
11625                     target_grouplist[i] = tswap32(grouplist[i]);
11626                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11627             }
11628         }
11629         return ret;
11630 #endif
11631 #ifdef TARGET_NR_setgroups32
11632     case TARGET_NR_setgroups32:
11633         {
11634             int gidsetsize = arg1;
11635             uint32_t *target_grouplist;
11636             gid_t *grouplist;
11637             int i;
11638 
11639             grouplist = alloca(gidsetsize * sizeof(gid_t));
11640             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11641             if (!target_grouplist) {
11642                 return -TARGET_EFAULT;
11643             }
11644             for(i = 0;i < gidsetsize; i++)
11645                 grouplist[i] = tswap32(target_grouplist[i]);
11646             unlock_user(target_grouplist, arg2, 0);
11647             return get_errno(setgroups(gidsetsize, grouplist));
11648         }
11649 #endif
11650 #ifdef TARGET_NR_fchown32
11651     case TARGET_NR_fchown32:
11652         return get_errno(fchown(arg1, arg2, arg3));
11653 #endif
11654 #ifdef TARGET_NR_setresuid32
11655     case TARGET_NR_setresuid32:
11656         return get_errno(sys_setresuid(arg1, arg2, arg3));
11657 #endif
11658 #ifdef TARGET_NR_getresuid32
11659     case TARGET_NR_getresuid32:
11660         {
11661             uid_t ruid, euid, suid;
11662             ret = get_errno(getresuid(&ruid, &euid, &suid));
11663             if (!is_error(ret)) {
11664                 if (put_user_u32(ruid, arg1)
11665                     || put_user_u32(euid, arg2)
11666                     || put_user_u32(suid, arg3))
11667                     return -TARGET_EFAULT;
11668             }
11669         }
11670         return ret;
11671 #endif
11672 #ifdef TARGET_NR_setresgid32
11673     case TARGET_NR_setresgid32:
11674         return get_errno(sys_setresgid(arg1, arg2, arg3));
11675 #endif
11676 #ifdef TARGET_NR_getresgid32
11677     case TARGET_NR_getresgid32:
11678         {
11679             gid_t rgid, egid, sgid;
11680             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11681             if (!is_error(ret)) {
11682                 if (put_user_u32(rgid, arg1)
11683                     || put_user_u32(egid, arg2)
11684                     || put_user_u32(sgid, arg3))
11685                     return -TARGET_EFAULT;
11686             }
11687         }
11688         return ret;
11689 #endif
11690 #ifdef TARGET_NR_chown32
11691     case TARGET_NR_chown32:
11692         if (!(p = lock_user_string(arg1)))
11693             return -TARGET_EFAULT;
11694         ret = get_errno(chown(p, arg2, arg3));
11695         unlock_user(p, arg1, 0);
11696         return ret;
11697 #endif
11698 #ifdef TARGET_NR_setuid32
11699     case TARGET_NR_setuid32:
11700         return get_errno(sys_setuid(arg1));
11701 #endif
11702 #ifdef TARGET_NR_setgid32
11703     case TARGET_NR_setgid32:
11704         return get_errno(sys_setgid(arg1));
11705 #endif
11706 #ifdef TARGET_NR_setfsuid32
11707     case TARGET_NR_setfsuid32:
11708         return get_errno(setfsuid(arg1));
11709 #endif
11710 #ifdef TARGET_NR_setfsgid32
11711     case TARGET_NR_setfsgid32:
11712         return get_errno(setfsgid(arg1));
11713 #endif
11714 #ifdef TARGET_NR_mincore
11715     case TARGET_NR_mincore:
11716         {
11717             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11718             if (!a) {
11719                 return -TARGET_ENOMEM;
11720             }
11721             p = lock_user_string(arg3);
11722             if (!p) {
11723                 ret = -TARGET_EFAULT;
11724             } else {
11725                 ret = get_errno(mincore(a, arg2, p));
11726                 unlock_user(p, arg3, ret);
11727             }
11728             unlock_user(a, arg1, 0);
11729         }
11730         return ret;
11731 #endif
11732 #ifdef TARGET_NR_arm_fadvise64_64
11733     case TARGET_NR_arm_fadvise64_64:
11734         /* arm_fadvise64_64 looks like fadvise64_64 but
11735          * with different argument order: fd, advice, offset, len
11736          * rather than the usual fd, offset, len, advice.
11737          * Note that offset and len are both 64-bit so appear as
11738          * pairs of 32-bit registers.
11739          */
11740         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11741                             target_offset64(arg5, arg6), arg2);
11742         return -host_to_target_errno(ret);
11743 #endif
11744 
11745 #if TARGET_ABI_BITS == 32
11746 
11747 #ifdef TARGET_NR_fadvise64_64
11748     case TARGET_NR_fadvise64_64:
11749 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11750         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11751         ret = arg2;
11752         arg2 = arg3;
11753         arg3 = arg4;
11754         arg4 = arg5;
11755         arg5 = arg6;
11756         arg6 = ret;
11757 #else
11758         /* 6 args: fd, offset (high, low), len (high, low), advice */
11759         if (regpairs_aligned(cpu_env, num)) {
11760             /* offset is in (3,4), len in (5,6) and advice in 7 */
11761             arg2 = arg3;
11762             arg3 = arg4;
11763             arg4 = arg5;
11764             arg5 = arg6;
11765             arg6 = arg7;
11766         }
11767 #endif
11768         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11769                             target_offset64(arg4, arg5), arg6);
11770         return -host_to_target_errno(ret);
11771 #endif
11772 
11773 #ifdef TARGET_NR_fadvise64
11774     case TARGET_NR_fadvise64:
11775         /* 5 args: fd, offset (high, low), len, advice */
11776         if (regpairs_aligned(cpu_env, num)) {
11777             /* offset is in (3,4), len in 5 and advice in 6 */
11778             arg2 = arg3;
11779             arg3 = arg4;
11780             arg4 = arg5;
11781             arg5 = arg6;
11782         }
11783         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11784         return -host_to_target_errno(ret);
11785 #endif
11786 
11787 #else /* not a 32-bit ABI */
11788 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11789 #ifdef TARGET_NR_fadvise64_64
11790     case TARGET_NR_fadvise64_64:
11791 #endif
11792 #ifdef TARGET_NR_fadvise64
11793     case TARGET_NR_fadvise64:
11794 #endif
11795 #ifdef TARGET_S390X
11796         switch (arg4) {
11797         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11798         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11799         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11800         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11801         default: break;
11802         }
11803 #endif
11804         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11805 #endif
11806 #endif /* end of 64-bit ABI fadvise handling */
11807 
11808 #ifdef TARGET_NR_madvise
11809     case TARGET_NR_madvise:
11810         /* A straight passthrough may not be safe because qemu sometimes
11811            turns private file-backed mappings into anonymous mappings.
11812            This will break MADV_DONTNEED.
11813            This is a hint, so ignoring and returning success is ok.  */
11814         return 0;
11815 #endif
11816 #ifdef TARGET_NR_fcntl64
11817     case TARGET_NR_fcntl64:
11818     {
11819         int cmd;
11820         struct flock64 fl;
11821         from_flock64_fn *copyfrom = copy_from_user_flock64;
11822         to_flock64_fn *copyto = copy_to_user_flock64;
11823 
11824 #ifdef TARGET_ARM
11825         if (!cpu_env->eabi) {
11826             copyfrom = copy_from_user_oabi_flock64;
11827             copyto = copy_to_user_oabi_flock64;
11828         }
11829 #endif
11830 
11831         cmd = target_to_host_fcntl_cmd(arg2);
11832         if (cmd == -TARGET_EINVAL) {
11833             return cmd;
11834         }
11835 
11836         switch(arg2) {
11837         case TARGET_F_GETLK64:
11838             ret = copyfrom(&fl, arg3);
11839             if (ret) {
11840                 break;
11841             }
11842             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11843             if (ret == 0) {
11844                 ret = copyto(arg3, &fl);
11845             }
11846 	    break;
11847 
11848         case TARGET_F_SETLK64:
11849         case TARGET_F_SETLKW64:
11850             ret = copyfrom(&fl, arg3);
11851             if (ret) {
11852                 break;
11853             }
11854             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11855 	    break;
11856         default:
11857             ret = do_fcntl(arg1, arg2, arg3);
11858             break;
11859         }
11860         return ret;
11861     }
11862 #endif
11863 #ifdef TARGET_NR_cacheflush
11864     case TARGET_NR_cacheflush:
11865         /* self-modifying code is handled automatically, so nothing needed */
11866         return 0;
11867 #endif
11868 #ifdef TARGET_NR_getpagesize
11869     case TARGET_NR_getpagesize:
11870         return TARGET_PAGE_SIZE;
11871 #endif
11872     case TARGET_NR_gettid:
11873         return get_errno(sys_gettid());
11874 #ifdef TARGET_NR_readahead
11875     case TARGET_NR_readahead:
11876 #if TARGET_ABI_BITS == 32
11877         if (regpairs_aligned(cpu_env, num)) {
11878             arg2 = arg3;
11879             arg3 = arg4;
11880             arg4 = arg5;
11881         }
11882         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11883 #else
11884         ret = get_errno(readahead(arg1, arg2, arg3));
11885 #endif
11886         return ret;
11887 #endif
11888 #ifdef CONFIG_ATTR
11889 #ifdef TARGET_NR_setxattr
11890     case TARGET_NR_listxattr:
11891     case TARGET_NR_llistxattr:
11892     {
11893         void *p, *b = 0;
11894         if (arg2) {
11895             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11896             if (!b) {
11897                 return -TARGET_EFAULT;
11898             }
11899         }
11900         p = lock_user_string(arg1);
11901         if (p) {
11902             if (num == TARGET_NR_listxattr) {
11903                 ret = get_errno(listxattr(p, b, arg3));
11904             } else {
11905                 ret = get_errno(llistxattr(p, b, arg3));
11906             }
11907         } else {
11908             ret = -TARGET_EFAULT;
11909         }
11910         unlock_user(p, arg1, 0);
11911         unlock_user(b, arg2, arg3);
11912         return ret;
11913     }
11914     case TARGET_NR_flistxattr:
11915     {
11916         void *b = 0;
11917         if (arg2) {
11918             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11919             if (!b) {
11920                 return -TARGET_EFAULT;
11921             }
11922         }
11923         ret = get_errno(flistxattr(arg1, b, arg3));
11924         unlock_user(b, arg2, arg3);
11925         return ret;
11926     }
11927     case TARGET_NR_setxattr:
11928     case TARGET_NR_lsetxattr:
11929         {
11930             void *p, *n, *v = 0;
11931             if (arg3) {
11932                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11933                 if (!v) {
11934                     return -TARGET_EFAULT;
11935                 }
11936             }
11937             p = lock_user_string(arg1);
11938             n = lock_user_string(arg2);
11939             if (p && n) {
11940                 if (num == TARGET_NR_setxattr) {
11941                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11942                 } else {
11943                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11944                 }
11945             } else {
11946                 ret = -TARGET_EFAULT;
11947             }
11948             unlock_user(p, arg1, 0);
11949             unlock_user(n, arg2, 0);
11950             unlock_user(v, arg3, 0);
11951         }
11952         return ret;
11953     case TARGET_NR_fsetxattr:
11954         {
11955             void *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             n = lock_user_string(arg2);
11963             if (n) {
11964                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11965             } else {
11966                 ret = -TARGET_EFAULT;
11967             }
11968             unlock_user(n, arg2, 0);
11969             unlock_user(v, arg3, 0);
11970         }
11971         return ret;
11972     case TARGET_NR_getxattr:
11973     case TARGET_NR_lgetxattr:
11974         {
11975             void *p, *n, *v = 0;
11976             if (arg3) {
11977                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11978                 if (!v) {
11979                     return -TARGET_EFAULT;
11980                 }
11981             }
11982             p = lock_user_string(arg1);
11983             n = lock_user_string(arg2);
11984             if (p && n) {
11985                 if (num == TARGET_NR_getxattr) {
11986                     ret = get_errno(getxattr(p, n, v, arg4));
11987                 } else {
11988                     ret = get_errno(lgetxattr(p, n, v, arg4));
11989                 }
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(p, arg1, 0);
11994             unlock_user(n, arg2, 0);
11995             unlock_user(v, arg3, arg4);
11996         }
11997         return ret;
11998     case TARGET_NR_fgetxattr:
11999         {
12000             void *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             n = lock_user_string(arg2);
12008             if (n) {
12009                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12010             } else {
12011                 ret = -TARGET_EFAULT;
12012             }
12013             unlock_user(n, arg2, 0);
12014             unlock_user(v, arg3, arg4);
12015         }
12016         return ret;
12017     case TARGET_NR_removexattr:
12018     case TARGET_NR_lremovexattr:
12019         {
12020             void *p, *n;
12021             p = lock_user_string(arg1);
12022             n = lock_user_string(arg2);
12023             if (p && n) {
12024                 if (num == TARGET_NR_removexattr) {
12025                     ret = get_errno(removexattr(p, n));
12026                 } else {
12027                     ret = get_errno(lremovexattr(p, n));
12028                 }
12029             } else {
12030                 ret = -TARGET_EFAULT;
12031             }
12032             unlock_user(p, arg1, 0);
12033             unlock_user(n, arg2, 0);
12034         }
12035         return ret;
12036     case TARGET_NR_fremovexattr:
12037         {
12038             void *n;
12039             n = lock_user_string(arg2);
12040             if (n) {
12041                 ret = get_errno(fremovexattr(arg1, n));
12042             } else {
12043                 ret = -TARGET_EFAULT;
12044             }
12045             unlock_user(n, arg2, 0);
12046         }
12047         return ret;
12048 #endif
12049 #endif /* CONFIG_ATTR */
12050 #ifdef TARGET_NR_set_thread_area
12051     case TARGET_NR_set_thread_area:
12052 #if defined(TARGET_MIPS)
12053       cpu_env->active_tc.CP0_UserLocal = arg1;
12054       return 0;
12055 #elif defined(TARGET_CRIS)
12056       if (arg1 & 0xff)
12057           ret = -TARGET_EINVAL;
12058       else {
12059           cpu_env->pregs[PR_PID] = arg1;
12060           ret = 0;
12061       }
12062       return ret;
12063 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12064       return do_set_thread_area(cpu_env, arg1);
12065 #elif defined(TARGET_M68K)
12066       {
12067           TaskState *ts = cpu->opaque;
12068           ts->tp_value = arg1;
12069           return 0;
12070       }
12071 #else
12072       return -TARGET_ENOSYS;
12073 #endif
12074 #endif
12075 #ifdef TARGET_NR_get_thread_area
12076     case TARGET_NR_get_thread_area:
12077 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12078         return do_get_thread_area(cpu_env, arg1);
12079 #elif defined(TARGET_M68K)
12080         {
12081             TaskState *ts = cpu->opaque;
12082             return ts->tp_value;
12083         }
12084 #else
12085         return -TARGET_ENOSYS;
12086 #endif
12087 #endif
12088 #ifdef TARGET_NR_getdomainname
12089     case TARGET_NR_getdomainname:
12090         return -TARGET_ENOSYS;
12091 #endif
12092 
12093 #ifdef TARGET_NR_clock_settime
12094     case TARGET_NR_clock_settime:
12095     {
12096         struct timespec ts;
12097 
12098         ret = target_to_host_timespec(&ts, arg2);
12099         if (!is_error(ret)) {
12100             ret = get_errno(clock_settime(arg1, &ts));
12101         }
12102         return ret;
12103     }
12104 #endif
12105 #ifdef TARGET_NR_clock_settime64
12106     case TARGET_NR_clock_settime64:
12107     {
12108         struct timespec ts;
12109 
12110         ret = target_to_host_timespec64(&ts, arg2);
12111         if (!is_error(ret)) {
12112             ret = get_errno(clock_settime(arg1, &ts));
12113         }
12114         return ret;
12115     }
12116 #endif
12117 #ifdef TARGET_NR_clock_gettime
12118     case TARGET_NR_clock_gettime:
12119     {
12120         struct timespec ts;
12121         ret = get_errno(clock_gettime(arg1, &ts));
12122         if (!is_error(ret)) {
12123             ret = host_to_target_timespec(arg2, &ts);
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_gettime64
12129     case TARGET_NR_clock_gettime64:
12130     {
12131         struct timespec ts;
12132         ret = get_errno(clock_gettime(arg1, &ts));
12133         if (!is_error(ret)) {
12134             ret = host_to_target_timespec64(arg2, &ts);
12135         }
12136         return ret;
12137     }
12138 #endif
12139 #ifdef TARGET_NR_clock_getres
12140     case TARGET_NR_clock_getres:
12141     {
12142         struct timespec ts;
12143         ret = get_errno(clock_getres(arg1, &ts));
12144         if (!is_error(ret)) {
12145             host_to_target_timespec(arg2, &ts);
12146         }
12147         return ret;
12148     }
12149 #endif
12150 #ifdef TARGET_NR_clock_getres_time64
12151     case TARGET_NR_clock_getres_time64:
12152     {
12153         struct timespec ts;
12154         ret = get_errno(clock_getres(arg1, &ts));
12155         if (!is_error(ret)) {
12156             host_to_target_timespec64(arg2, &ts);
12157         }
12158         return ret;
12159     }
12160 #endif
12161 #ifdef TARGET_NR_clock_nanosleep
12162     case TARGET_NR_clock_nanosleep:
12163     {
12164         struct timespec ts;
12165         if (target_to_host_timespec(&ts, arg3)) {
12166             return -TARGET_EFAULT;
12167         }
12168         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12169                                              &ts, arg4 ? &ts : NULL));
12170         /*
12171          * if the call is interrupted by a signal handler, it fails
12172          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12173          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12174          */
12175         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12176             host_to_target_timespec(arg4, &ts)) {
12177               return -TARGET_EFAULT;
12178         }
12179 
12180         return ret;
12181     }
12182 #endif
12183 #ifdef TARGET_NR_clock_nanosleep_time64
12184     case TARGET_NR_clock_nanosleep_time64:
12185     {
12186         struct timespec ts;
12187 
12188         if (target_to_host_timespec64(&ts, arg3)) {
12189             return -TARGET_EFAULT;
12190         }
12191 
12192         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12193                                              &ts, arg4 ? &ts : NULL));
12194 
12195         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12196             host_to_target_timespec64(arg4, &ts)) {
12197             return -TARGET_EFAULT;
12198         }
12199         return ret;
12200     }
12201 #endif
12202 
12203 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12204     case TARGET_NR_set_tid_address:
12205         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12206 #endif
12207 
12208     case TARGET_NR_tkill:
12209         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12210 
12211     case TARGET_NR_tgkill:
12212         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12213                          target_to_host_signal(arg3)));
12214 
12215 #ifdef TARGET_NR_set_robust_list
12216     case TARGET_NR_set_robust_list:
12217     case TARGET_NR_get_robust_list:
12218         /* The ABI for supporting robust futexes has userspace pass
12219          * the kernel a pointer to a linked list which is updated by
12220          * userspace after the syscall; the list is walked by the kernel
12221          * when the thread exits. Since the linked list in QEMU guest
12222          * memory isn't a valid linked list for the host and we have
12223          * no way to reliably intercept the thread-death event, we can't
12224          * support these. Silently return ENOSYS so that guest userspace
12225          * falls back to a non-robust futex implementation (which should
12226          * be OK except in the corner case of the guest crashing while
12227          * holding a mutex that is shared with another process via
12228          * shared memory).
12229          */
12230         return -TARGET_ENOSYS;
12231 #endif
12232 
12233 #if defined(TARGET_NR_utimensat)
12234     case TARGET_NR_utimensat:
12235         {
12236             struct timespec *tsp, ts[2];
12237             if (!arg3) {
12238                 tsp = NULL;
12239             } else {
12240                 if (target_to_host_timespec(ts, arg3)) {
12241                     return -TARGET_EFAULT;
12242                 }
12243                 if (target_to_host_timespec(ts + 1, arg3 +
12244                                             sizeof(struct target_timespec))) {
12245                     return -TARGET_EFAULT;
12246                 }
12247                 tsp = ts;
12248             }
12249             if (!arg2)
12250                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12251             else {
12252                 if (!(p = lock_user_string(arg2))) {
12253                     return -TARGET_EFAULT;
12254                 }
12255                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12256                 unlock_user(p, arg2, 0);
12257             }
12258         }
12259         return ret;
12260 #endif
12261 #ifdef TARGET_NR_utimensat_time64
12262     case TARGET_NR_utimensat_time64:
12263         {
12264             struct timespec *tsp, ts[2];
12265             if (!arg3) {
12266                 tsp = NULL;
12267             } else {
12268                 if (target_to_host_timespec64(ts, arg3)) {
12269                     return -TARGET_EFAULT;
12270                 }
12271                 if (target_to_host_timespec64(ts + 1, arg3 +
12272                                      sizeof(struct target__kernel_timespec))) {
12273                     return -TARGET_EFAULT;
12274                 }
12275                 tsp = ts;
12276             }
12277             if (!arg2)
12278                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12279             else {
12280                 p = lock_user_string(arg2);
12281                 if (!p) {
12282                     return -TARGET_EFAULT;
12283                 }
12284                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12285                 unlock_user(p, arg2, 0);
12286             }
12287         }
12288         return ret;
12289 #endif
12290 #ifdef TARGET_NR_futex
12291     case TARGET_NR_futex:
12292         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12293 #endif
12294 #ifdef TARGET_NR_futex_time64
12295     case TARGET_NR_futex_time64:
12296         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12297 #endif
12298 #ifdef CONFIG_INOTIFY
12299 #if defined(TARGET_NR_inotify_init)
12300     case TARGET_NR_inotify_init:
12301         ret = get_errno(inotify_init());
12302         if (ret >= 0) {
12303             fd_trans_register(ret, &target_inotify_trans);
12304         }
12305         return ret;
12306 #endif
12307 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12308     case TARGET_NR_inotify_init1:
12309         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12310                                           fcntl_flags_tbl)));
12311         if (ret >= 0) {
12312             fd_trans_register(ret, &target_inotify_trans);
12313         }
12314         return ret;
12315 #endif
12316 #if defined(TARGET_NR_inotify_add_watch)
12317     case TARGET_NR_inotify_add_watch:
12318         p = lock_user_string(arg2);
12319         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12320         unlock_user(p, arg2, 0);
12321         return ret;
12322 #endif
12323 #if defined(TARGET_NR_inotify_rm_watch)
12324     case TARGET_NR_inotify_rm_watch:
12325         return get_errno(inotify_rm_watch(arg1, arg2));
12326 #endif
12327 #endif
12328 
12329 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12330     case TARGET_NR_mq_open:
12331         {
12332             struct mq_attr posix_mq_attr;
12333             struct mq_attr *pposix_mq_attr;
12334             int host_flags;
12335 
12336             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12337             pposix_mq_attr = NULL;
12338             if (arg4) {
12339                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12340                     return -TARGET_EFAULT;
12341                 }
12342                 pposix_mq_attr = &posix_mq_attr;
12343             }
12344             p = lock_user_string(arg1 - 1);
12345             if (!p) {
12346                 return -TARGET_EFAULT;
12347             }
12348             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12349             unlock_user (p, arg1, 0);
12350         }
12351         return ret;
12352 
12353     case TARGET_NR_mq_unlink:
12354         p = lock_user_string(arg1 - 1);
12355         if (!p) {
12356             return -TARGET_EFAULT;
12357         }
12358         ret = get_errno(mq_unlink(p));
12359         unlock_user (p, arg1, 0);
12360         return ret;
12361 
12362 #ifdef TARGET_NR_mq_timedsend
12363     case TARGET_NR_mq_timedsend:
12364         {
12365             struct timespec ts;
12366 
12367             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12368             if (arg5 != 0) {
12369                 if (target_to_host_timespec(&ts, arg5)) {
12370                     return -TARGET_EFAULT;
12371                 }
12372                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12373                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12374                     return -TARGET_EFAULT;
12375                 }
12376             } else {
12377                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12378             }
12379             unlock_user (p, arg2, arg3);
12380         }
12381         return ret;
12382 #endif
12383 #ifdef TARGET_NR_mq_timedsend_time64
12384     case TARGET_NR_mq_timedsend_time64:
12385         {
12386             struct timespec ts;
12387 
12388             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12389             if (arg5 != 0) {
12390                 if (target_to_host_timespec64(&ts, arg5)) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12394                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397             } else {
12398                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12399             }
12400             unlock_user(p, arg2, arg3);
12401         }
12402         return ret;
12403 #endif
12404 
12405 #ifdef TARGET_NR_mq_timedreceive
12406     case TARGET_NR_mq_timedreceive:
12407         {
12408             struct timespec ts;
12409             unsigned int prio;
12410 
12411             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12412             if (arg5 != 0) {
12413                 if (target_to_host_timespec(&ts, arg5)) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12417                                                      &prio, &ts));
12418                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12419                     return -TARGET_EFAULT;
12420                 }
12421             } else {
12422                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12423                                                      &prio, NULL));
12424             }
12425             unlock_user (p, arg2, arg3);
12426             if (arg4 != 0)
12427                 put_user_u32(prio, arg4);
12428         }
12429         return ret;
12430 #endif
12431 #ifdef TARGET_NR_mq_timedreceive_time64
12432     case TARGET_NR_mq_timedreceive_time64:
12433         {
12434             struct timespec ts;
12435             unsigned int prio;
12436 
12437             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12438             if (arg5 != 0) {
12439                 if (target_to_host_timespec64(&ts, arg5)) {
12440                     return -TARGET_EFAULT;
12441                 }
12442                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12443                                                      &prio, &ts));
12444                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12445                     return -TARGET_EFAULT;
12446                 }
12447             } else {
12448                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12449                                                      &prio, NULL));
12450             }
12451             unlock_user(p, arg2, arg3);
12452             if (arg4 != 0) {
12453                 put_user_u32(prio, arg4);
12454             }
12455         }
12456         return ret;
12457 #endif
12458 
12459     /* Not implemented for now... */
12460 /*     case TARGET_NR_mq_notify: */
12461 /*         break; */
12462 
12463     case TARGET_NR_mq_getsetattr:
12464         {
12465             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12466             ret = 0;
12467             if (arg2 != 0) {
12468                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12469                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12470                                            &posix_mq_attr_out));
12471             } else if (arg3 != 0) {
12472                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12473             }
12474             if (ret == 0 && arg3 != 0) {
12475                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12476             }
12477         }
12478         return ret;
12479 #endif
12480 
12481 #ifdef CONFIG_SPLICE
12482 #ifdef TARGET_NR_tee
12483     case TARGET_NR_tee:
12484         {
12485             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12486         }
12487         return ret;
12488 #endif
12489 #ifdef TARGET_NR_splice
12490     case TARGET_NR_splice:
12491         {
12492             loff_t loff_in, loff_out;
12493             loff_t *ploff_in = NULL, *ploff_out = NULL;
12494             if (arg2) {
12495                 if (get_user_u64(loff_in, arg2)) {
12496                     return -TARGET_EFAULT;
12497                 }
12498                 ploff_in = &loff_in;
12499             }
12500             if (arg4) {
12501                 if (get_user_u64(loff_out, arg4)) {
12502                     return -TARGET_EFAULT;
12503                 }
12504                 ploff_out = &loff_out;
12505             }
12506             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12507             if (arg2) {
12508                 if (put_user_u64(loff_in, arg2)) {
12509                     return -TARGET_EFAULT;
12510                 }
12511             }
12512             if (arg4) {
12513                 if (put_user_u64(loff_out, arg4)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516             }
12517         }
12518         return ret;
12519 #endif
12520 #ifdef TARGET_NR_vmsplice
12521 	case TARGET_NR_vmsplice:
12522         {
12523             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12524             if (vec != NULL) {
12525                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12526                 unlock_iovec(vec, arg2, arg3, 0);
12527             } else {
12528                 ret = -host_to_target_errno(errno);
12529             }
12530         }
12531         return ret;
12532 #endif
12533 #endif /* CONFIG_SPLICE */
12534 #ifdef CONFIG_EVENTFD
12535 #if defined(TARGET_NR_eventfd)
12536     case TARGET_NR_eventfd:
12537         ret = get_errno(eventfd(arg1, 0));
12538         if (ret >= 0) {
12539             fd_trans_register(ret, &target_eventfd_trans);
12540         }
12541         return ret;
12542 #endif
12543 #if defined(TARGET_NR_eventfd2)
12544     case TARGET_NR_eventfd2:
12545     {
12546         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12547         if (arg2 & TARGET_O_NONBLOCK) {
12548             host_flags |= O_NONBLOCK;
12549         }
12550         if (arg2 & TARGET_O_CLOEXEC) {
12551             host_flags |= O_CLOEXEC;
12552         }
12553         ret = get_errno(eventfd(arg1, host_flags));
12554         if (ret >= 0) {
12555             fd_trans_register(ret, &target_eventfd_trans);
12556         }
12557         return ret;
12558     }
12559 #endif
12560 #endif /* CONFIG_EVENTFD  */
12561 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12562     case TARGET_NR_fallocate:
12563 #if TARGET_ABI_BITS == 32
12564         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12565                                   target_offset64(arg5, arg6)));
12566 #else
12567         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12568 #endif
12569         return ret;
12570 #endif
12571 #if defined(CONFIG_SYNC_FILE_RANGE)
12572 #if defined(TARGET_NR_sync_file_range)
12573     case TARGET_NR_sync_file_range:
12574 #if TARGET_ABI_BITS == 32
12575 #if defined(TARGET_MIPS)
12576         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12577                                         target_offset64(arg5, arg6), arg7));
12578 #else
12579         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12580                                         target_offset64(arg4, arg5), arg6));
12581 #endif /* !TARGET_MIPS */
12582 #else
12583         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12584 #endif
12585         return ret;
12586 #endif
12587 #if defined(TARGET_NR_sync_file_range2) || \
12588     defined(TARGET_NR_arm_sync_file_range)
12589 #if defined(TARGET_NR_sync_file_range2)
12590     case TARGET_NR_sync_file_range2:
12591 #endif
12592 #if defined(TARGET_NR_arm_sync_file_range)
12593     case TARGET_NR_arm_sync_file_range:
12594 #endif
12595         /* This is like sync_file_range but the arguments are reordered */
12596 #if TARGET_ABI_BITS == 32
12597         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12598                                         target_offset64(arg5, arg6), arg2));
12599 #else
12600         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12601 #endif
12602         return ret;
12603 #endif
12604 #endif
12605 #if defined(TARGET_NR_signalfd4)
12606     case TARGET_NR_signalfd4:
12607         return do_signalfd4(arg1, arg2, arg4);
12608 #endif
12609 #if defined(TARGET_NR_signalfd)
12610     case TARGET_NR_signalfd:
12611         return do_signalfd4(arg1, arg2, 0);
12612 #endif
12613 #if defined(CONFIG_EPOLL)
12614 #if defined(TARGET_NR_epoll_create)
12615     case TARGET_NR_epoll_create:
12616         return get_errno(epoll_create(arg1));
12617 #endif
12618 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12619     case TARGET_NR_epoll_create1:
12620         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12621 #endif
12622 #if defined(TARGET_NR_epoll_ctl)
12623     case TARGET_NR_epoll_ctl:
12624     {
12625         struct epoll_event ep;
12626         struct epoll_event *epp = 0;
12627         if (arg4) {
12628             if (arg2 != EPOLL_CTL_DEL) {
12629                 struct target_epoll_event *target_ep;
12630                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12631                     return -TARGET_EFAULT;
12632                 }
12633                 ep.events = tswap32(target_ep->events);
12634                 /*
12635                  * The epoll_data_t union is just opaque data to the kernel,
12636                  * so we transfer all 64 bits across and need not worry what
12637                  * actual data type it is.
12638                  */
12639                 ep.data.u64 = tswap64(target_ep->data.u64);
12640                 unlock_user_struct(target_ep, arg4, 0);
12641             }
12642             /*
12643              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12644              * non-null pointer, even though this argument is ignored.
12645              *
12646              */
12647             epp = &ep;
12648         }
12649         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12650     }
12651 #endif
12652 
12653 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12654 #if defined(TARGET_NR_epoll_wait)
12655     case TARGET_NR_epoll_wait:
12656 #endif
12657 #if defined(TARGET_NR_epoll_pwait)
12658     case TARGET_NR_epoll_pwait:
12659 #endif
12660     {
12661         struct target_epoll_event *target_ep;
12662         struct epoll_event *ep;
12663         int epfd = arg1;
12664         int maxevents = arg3;
12665         int timeout = arg4;
12666 
12667         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12668             return -TARGET_EINVAL;
12669         }
12670 
12671         target_ep = lock_user(VERIFY_WRITE, arg2,
12672                               maxevents * sizeof(struct target_epoll_event), 1);
12673         if (!target_ep) {
12674             return -TARGET_EFAULT;
12675         }
12676 
12677         ep = g_try_new(struct epoll_event, maxevents);
12678         if (!ep) {
12679             unlock_user(target_ep, arg2, 0);
12680             return -TARGET_ENOMEM;
12681         }
12682 
12683         switch (num) {
12684 #if defined(TARGET_NR_epoll_pwait)
12685         case TARGET_NR_epoll_pwait:
12686         {
12687             sigset_t *set = NULL;
12688 
12689             if (arg5) {
12690                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12691                 if (ret != 0) {
12692                     break;
12693                 }
12694             }
12695 
12696             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12697                                              set, SIGSET_T_SIZE));
12698 
12699             if (set) {
12700                 finish_sigsuspend_mask(ret);
12701             }
12702             break;
12703         }
12704 #endif
12705 #if defined(TARGET_NR_epoll_wait)
12706         case TARGET_NR_epoll_wait:
12707             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12708                                              NULL, 0));
12709             break;
12710 #endif
12711         default:
12712             ret = -TARGET_ENOSYS;
12713         }
12714         if (!is_error(ret)) {
12715             int i;
12716             for (i = 0; i < ret; i++) {
12717                 target_ep[i].events = tswap32(ep[i].events);
12718                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12719             }
12720             unlock_user(target_ep, arg2,
12721                         ret * sizeof(struct target_epoll_event));
12722         } else {
12723             unlock_user(target_ep, arg2, 0);
12724         }
12725         g_free(ep);
12726         return ret;
12727     }
12728 #endif
12729 #endif
12730 #ifdef TARGET_NR_prlimit64
12731     case TARGET_NR_prlimit64:
12732     {
12733         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12734         struct target_rlimit64 *target_rnew, *target_rold;
12735         struct host_rlimit64 rnew, rold, *rnewp = 0;
12736         int resource = target_to_host_resource(arg2);
12737 
12738         if (arg3 && (resource != RLIMIT_AS &&
12739                      resource != RLIMIT_DATA &&
12740                      resource != RLIMIT_STACK)) {
12741             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12742                 return -TARGET_EFAULT;
12743             }
12744             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12745             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12746             unlock_user_struct(target_rnew, arg3, 0);
12747             rnewp = &rnew;
12748         }
12749 
12750         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12751         if (!is_error(ret) && arg4) {
12752             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12753                 return -TARGET_EFAULT;
12754             }
12755             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12756             target_rold->rlim_max = tswap64(rold.rlim_max);
12757             unlock_user_struct(target_rold, arg4, 1);
12758         }
12759         return ret;
12760     }
12761 #endif
12762 #ifdef TARGET_NR_gethostname
12763     case TARGET_NR_gethostname:
12764     {
12765         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12766         if (name) {
12767             ret = get_errno(gethostname(name, arg2));
12768             unlock_user(name, arg1, arg2);
12769         } else {
12770             ret = -TARGET_EFAULT;
12771         }
12772         return ret;
12773     }
12774 #endif
12775 #ifdef TARGET_NR_atomic_cmpxchg_32
12776     case TARGET_NR_atomic_cmpxchg_32:
12777     {
12778         /* should use start_exclusive from main.c */
12779         abi_ulong mem_value;
12780         if (get_user_u32(mem_value, arg6)) {
12781             target_siginfo_t info;
12782             info.si_signo = SIGSEGV;
12783             info.si_errno = 0;
12784             info.si_code = TARGET_SEGV_MAPERR;
12785             info._sifields._sigfault._addr = arg6;
12786             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12787             ret = 0xdeadbeef;
12788 
12789         }
12790         if (mem_value == arg2)
12791             put_user_u32(arg1, arg6);
12792         return mem_value;
12793     }
12794 #endif
12795 #ifdef TARGET_NR_atomic_barrier
12796     case TARGET_NR_atomic_barrier:
12797         /* Like the kernel implementation and the
12798            qemu arm barrier, no-op this? */
12799         return 0;
12800 #endif
12801 
12802 #ifdef TARGET_NR_timer_create
12803     case TARGET_NR_timer_create:
12804     {
12805         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12806 
12807         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12808 
12809         int clkid = arg1;
12810         int timer_index = next_free_host_timer();
12811 
12812         if (timer_index < 0) {
12813             ret = -TARGET_EAGAIN;
12814         } else {
12815             timer_t *phtimer = g_posix_timers  + timer_index;
12816 
12817             if (arg2) {
12818                 phost_sevp = &host_sevp;
12819                 ret = target_to_host_sigevent(phost_sevp, arg2);
12820                 if (ret != 0) {
12821                     return ret;
12822                 }
12823             }
12824 
12825             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12826             if (ret) {
12827                 phtimer = NULL;
12828             } else {
12829                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12830                     return -TARGET_EFAULT;
12831                 }
12832             }
12833         }
12834         return ret;
12835     }
12836 #endif
12837 
12838 #ifdef TARGET_NR_timer_settime
12839     case TARGET_NR_timer_settime:
12840     {
12841         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12842          * struct itimerspec * old_value */
12843         target_timer_t timerid = get_timer_id(arg1);
12844 
12845         if (timerid < 0) {
12846             ret = timerid;
12847         } else if (arg3 == 0) {
12848             ret = -TARGET_EINVAL;
12849         } else {
12850             timer_t htimer = g_posix_timers[timerid];
12851             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12852 
12853             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12854                 return -TARGET_EFAULT;
12855             }
12856             ret = get_errno(
12857                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12858             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12859                 return -TARGET_EFAULT;
12860             }
12861         }
12862         return ret;
12863     }
12864 #endif
12865 
12866 #ifdef TARGET_NR_timer_settime64
12867     case TARGET_NR_timer_settime64:
12868     {
12869         target_timer_t timerid = get_timer_id(arg1);
12870 
12871         if (timerid < 0) {
12872             ret = timerid;
12873         } else if (arg3 == 0) {
12874             ret = -TARGET_EINVAL;
12875         } else {
12876             timer_t htimer = g_posix_timers[timerid];
12877             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12878 
12879             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12880                 return -TARGET_EFAULT;
12881             }
12882             ret = get_errno(
12883                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12884             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12885                 return -TARGET_EFAULT;
12886             }
12887         }
12888         return ret;
12889     }
12890 #endif
12891 
12892 #ifdef TARGET_NR_timer_gettime
12893     case TARGET_NR_timer_gettime:
12894     {
12895         /* args: timer_t timerid, struct itimerspec *curr_value */
12896         target_timer_t timerid = get_timer_id(arg1);
12897 
12898         if (timerid < 0) {
12899             ret = timerid;
12900         } else if (!arg2) {
12901             ret = -TARGET_EFAULT;
12902         } else {
12903             timer_t htimer = g_posix_timers[timerid];
12904             struct itimerspec hspec;
12905             ret = get_errno(timer_gettime(htimer, &hspec));
12906 
12907             if (host_to_target_itimerspec(arg2, &hspec)) {
12908                 ret = -TARGET_EFAULT;
12909             }
12910         }
12911         return ret;
12912     }
12913 #endif
12914 
12915 #ifdef TARGET_NR_timer_gettime64
12916     case TARGET_NR_timer_gettime64:
12917     {
12918         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12919         target_timer_t timerid = get_timer_id(arg1);
12920 
12921         if (timerid < 0) {
12922             ret = timerid;
12923         } else if (!arg2) {
12924             ret = -TARGET_EFAULT;
12925         } else {
12926             timer_t htimer = g_posix_timers[timerid];
12927             struct itimerspec hspec;
12928             ret = get_errno(timer_gettime(htimer, &hspec));
12929 
12930             if (host_to_target_itimerspec64(arg2, &hspec)) {
12931                 ret = -TARGET_EFAULT;
12932             }
12933         }
12934         return ret;
12935     }
12936 #endif
12937 
12938 #ifdef TARGET_NR_timer_getoverrun
12939     case TARGET_NR_timer_getoverrun:
12940     {
12941         /* args: timer_t timerid */
12942         target_timer_t timerid = get_timer_id(arg1);
12943 
12944         if (timerid < 0) {
12945             ret = timerid;
12946         } else {
12947             timer_t htimer = g_posix_timers[timerid];
12948             ret = get_errno(timer_getoverrun(htimer));
12949         }
12950         return ret;
12951     }
12952 #endif
12953 
12954 #ifdef TARGET_NR_timer_delete
12955     case TARGET_NR_timer_delete:
12956     {
12957         /* args: timer_t timerid */
12958         target_timer_t timerid = get_timer_id(arg1);
12959 
12960         if (timerid < 0) {
12961             ret = timerid;
12962         } else {
12963             timer_t htimer = g_posix_timers[timerid];
12964             ret = get_errno(timer_delete(htimer));
12965             g_posix_timers[timerid] = 0;
12966         }
12967         return ret;
12968     }
12969 #endif
12970 
12971 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12972     case TARGET_NR_timerfd_create:
12973         return get_errno(timerfd_create(arg1,
12974                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12975 #endif
12976 
12977 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12978     case TARGET_NR_timerfd_gettime:
12979         {
12980             struct itimerspec its_curr;
12981 
12982             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12983 
12984             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12985                 return -TARGET_EFAULT;
12986             }
12987         }
12988         return ret;
12989 #endif
12990 
12991 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12992     case TARGET_NR_timerfd_gettime64:
12993         {
12994             struct itimerspec its_curr;
12995 
12996             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12997 
12998             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12999                 return -TARGET_EFAULT;
13000             }
13001         }
13002         return ret;
13003 #endif
13004 
13005 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13006     case TARGET_NR_timerfd_settime:
13007         {
13008             struct itimerspec its_new, its_old, *p_new;
13009 
13010             if (arg3) {
13011                 if (target_to_host_itimerspec(&its_new, arg3)) {
13012                     return -TARGET_EFAULT;
13013                 }
13014                 p_new = &its_new;
13015             } else {
13016                 p_new = NULL;
13017             }
13018 
13019             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13020 
13021             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13022                 return -TARGET_EFAULT;
13023             }
13024         }
13025         return ret;
13026 #endif
13027 
13028 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13029     case TARGET_NR_timerfd_settime64:
13030         {
13031             struct itimerspec its_new, its_old, *p_new;
13032 
13033             if (arg3) {
13034                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13035                     return -TARGET_EFAULT;
13036                 }
13037                 p_new = &its_new;
13038             } else {
13039                 p_new = NULL;
13040             }
13041 
13042             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13043 
13044             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13045                 return -TARGET_EFAULT;
13046             }
13047         }
13048         return ret;
13049 #endif
13050 
13051 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13052     case TARGET_NR_ioprio_get:
13053         return get_errno(ioprio_get(arg1, arg2));
13054 #endif
13055 
13056 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13057     case TARGET_NR_ioprio_set:
13058         return get_errno(ioprio_set(arg1, arg2, arg3));
13059 #endif
13060 
13061 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13062     case TARGET_NR_setns:
13063         return get_errno(setns(arg1, arg2));
13064 #endif
13065 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13066     case TARGET_NR_unshare:
13067         return get_errno(unshare(arg1));
13068 #endif
13069 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13070     case TARGET_NR_kcmp:
13071         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13072 #endif
13073 #ifdef TARGET_NR_swapcontext
13074     case TARGET_NR_swapcontext:
13075         /* PowerPC specific.  */
13076         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13077 #endif
13078 #ifdef TARGET_NR_memfd_create
13079     case TARGET_NR_memfd_create:
13080         p = lock_user_string(arg1);
13081         if (!p) {
13082             return -TARGET_EFAULT;
13083         }
13084         ret = get_errno(memfd_create(p, arg2));
13085         fd_trans_unregister(ret);
13086         unlock_user(p, arg1, 0);
13087         return ret;
13088 #endif
13089 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13090     case TARGET_NR_membarrier:
13091         return get_errno(membarrier(arg1, arg2));
13092 #endif
13093 
13094 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13095     case TARGET_NR_copy_file_range:
13096         {
13097             loff_t inoff, outoff;
13098             loff_t *pinoff = NULL, *poutoff = NULL;
13099 
13100             if (arg2) {
13101                 if (get_user_u64(inoff, arg2)) {
13102                     return -TARGET_EFAULT;
13103                 }
13104                 pinoff = &inoff;
13105             }
13106             if (arg4) {
13107                 if (get_user_u64(outoff, arg4)) {
13108                     return -TARGET_EFAULT;
13109                 }
13110                 poutoff = &outoff;
13111             }
13112             /* Do not sign-extend the count parameter. */
13113             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13114                                                  (abi_ulong)arg5, arg6));
13115             if (!is_error(ret) && ret > 0) {
13116                 if (arg2) {
13117                     if (put_user_u64(inoff, arg2)) {
13118                         return -TARGET_EFAULT;
13119                     }
13120                 }
13121                 if (arg4) {
13122                     if (put_user_u64(outoff, arg4)) {
13123                         return -TARGET_EFAULT;
13124                     }
13125                 }
13126             }
13127         }
13128         return ret;
13129 #endif
13130 
13131 #if defined(TARGET_NR_pivot_root)
13132     case TARGET_NR_pivot_root:
13133         {
13134             void *p2;
13135             p = lock_user_string(arg1); /* new_root */
13136             p2 = lock_user_string(arg2); /* put_old */
13137             if (!p || !p2) {
13138                 ret = -TARGET_EFAULT;
13139             } else {
13140                 ret = get_errno(pivot_root(p, p2));
13141             }
13142             unlock_user(p2, arg2, 0);
13143             unlock_user(p, arg1, 0);
13144         }
13145         return ret;
13146 #endif
13147 
13148     default:
13149         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13150         return -TARGET_ENOSYS;
13151     }
13152     return ret;
13153 }
13154 
13155 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13156                     abi_long arg2, abi_long arg3, abi_long arg4,
13157                     abi_long arg5, abi_long arg6, abi_long arg7,
13158                     abi_long arg8)
13159 {
13160     CPUState *cpu = env_cpu(cpu_env);
13161     abi_long ret;
13162 
13163 #ifdef DEBUG_ERESTARTSYS
13164     /* Debug-only code for exercising the syscall-restart code paths
13165      * in the per-architecture cpu main loops: restart every syscall
13166      * the guest makes once before letting it through.
13167      */
13168     {
13169         static bool flag;
13170         flag = !flag;
13171         if (flag) {
13172             return -QEMU_ERESTARTSYS;
13173         }
13174     }
13175 #endif
13176 
13177     record_syscall_start(cpu, num, arg1,
13178                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13179 
13180     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13181         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13182     }
13183 
13184     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13185                       arg5, arg6, arg7, arg8);
13186 
13187     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13188         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13189                           arg3, arg4, arg5, arg6);
13190     }
13191 
13192     record_syscall_return(cpu, num, ret);
13193     return ret;
13194 }
13195