xref: /openbmc/qemu/linux-user/syscall.c (revision 52f0c160)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 #else
115 #include <linux/fs.h>
116 #endif
117 #include <linux/fd.h>
118 #if defined(CONFIG_FIEMAP)
119 #include <linux/fiemap.h>
120 #endif
121 #include <linux/fb.h>
122 #if defined(CONFIG_USBFS)
123 #include <linux/usbdevice_fs.h>
124 #include <linux/usb/ch9.h>
125 #endif
126 #include <linux/vt.h>
127 #include <linux/dm-ioctl.h>
128 #include <linux/reboot.h>
129 #include <linux/route.h>
130 #include <linux/filter.h>
131 #include <linux/blkpg.h>
132 #include <netpacket/packet.h>
133 #include <linux/netlink.h>
134 #include <linux/if_alg.h>
135 #include <linux/rtc.h>
136 #include <sound/asound.h>
137 #ifdef HAVE_BTRFS_H
138 #include <linux/btrfs.h>
139 #endif
140 #ifdef HAVE_DRM_H
141 #include <libdrm/drm.h>
142 #include <libdrm/i915_drm.h>
143 #endif
144 #include "linux_loop.h"
145 #include "uname.h"
146 
147 #include "qemu.h"
148 #include "user-internals.h"
149 #include "strace.h"
150 #include "signal-common.h"
151 #include "loader.h"
152 #include "user-mmap.h"
153 #include "user/safe-syscall.h"
154 #include "qemu/guest-random.h"
155 #include "qemu/selfmap.h"
156 #include "user/syscall-trace.h"
157 #include "special-errno.h"
158 #include "qapi/error.h"
159 #include "fd-trans.h"
160 #include "tcg/tcg.h"
161 
162 #ifndef CLONE_IO
163 #define CLONE_IO                0x80000000      /* Clone io context */
164 #endif
165 
166 /* We can't directly call the host clone syscall, because this will
167  * badly confuse libc (breaking mutexes, for example). So we must
168  * divide clone flags into:
169  *  * flag combinations that look like pthread_create()
170  *  * flag combinations that look like fork()
171  *  * flags we can implement within QEMU itself
172  *  * flags we can't support and will return an error for
173  */
174 /* For thread creation, all these flags must be present; for
175  * fork, none must be present.
176  */
177 #define CLONE_THREAD_FLAGS                              \
178     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
179      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
180 
181 /* These flags are ignored:
182  * CLONE_DETACHED is now ignored by the kernel;
183  * CLONE_IO is just an optimisation hint to the I/O scheduler
184  */
185 #define CLONE_IGNORED_FLAGS                     \
186     (CLONE_DETACHED | CLONE_IO)
187 
188 /* Flags for fork which we can implement within QEMU itself */
189 #define CLONE_OPTIONAL_FORK_FLAGS               \
190     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
191      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
192 
193 /* Flags for thread creation which we can implement within QEMU itself */
194 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
195     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
196      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
197 
198 #define CLONE_INVALID_FORK_FLAGS                                        \
199     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
200 
201 #define CLONE_INVALID_THREAD_FLAGS                                      \
202     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
203        CLONE_IGNORED_FLAGS))
204 
205 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
206  * have almost all been allocated. We cannot support any of
207  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
208  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
209  * The checks against the invalid thread masks above will catch these.
210  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
211  */
212 
213 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
214  * once. This exercises the codepaths for restart.
215  */
216 //#define DEBUG_ERESTARTSYS
217 
218 //#include <linux/msdos_fs.h>
219 #define VFAT_IOCTL_READDIR_BOTH \
220     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
221 #define VFAT_IOCTL_READDIR_SHORT \
222     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
223 
224 #undef _syscall0
225 #undef _syscall1
226 #undef _syscall2
227 #undef _syscall3
228 #undef _syscall4
229 #undef _syscall5
230 #undef _syscall6
231 
232 #define _syscall0(type,name)		\
233 static type name (void)			\
234 {					\
235 	return syscall(__NR_##name);	\
236 }
237 
238 #define _syscall1(type,name,type1,arg1)		\
239 static type name (type1 arg1)			\
240 {						\
241 	return syscall(__NR_##name, arg1);	\
242 }
243 
244 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
245 static type name (type1 arg1,type2 arg2)		\
246 {							\
247 	return syscall(__NR_##name, arg1, arg2);	\
248 }
249 
250 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3)		\
252 {								\
253 	return syscall(__NR_##name, arg1, arg2, arg3);		\
254 }
255 
256 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
258 {										\
259 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
260 }
261 
262 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
263 		  type5,arg5)							\
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
265 {										\
266 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
267 }
268 
269 
270 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
271 		  type5,arg5,type6,arg6)					\
272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
273                   type6 arg6)							\
274 {										\
275 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
276 }
277 
278 
279 #define __NR_sys_uname __NR_uname
280 #define __NR_sys_getcwd1 __NR_getcwd
281 #define __NR_sys_getdents __NR_getdents
282 #define __NR_sys_getdents64 __NR_getdents64
283 #define __NR_sys_getpriority __NR_getpriority
284 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
285 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
286 #define __NR_sys_syslog __NR_syslog
287 #if defined(__NR_futex)
288 # define __NR_sys_futex __NR_futex
289 #endif
290 #if defined(__NR_futex_time64)
291 # define __NR_sys_futex_time64 __NR_futex_time64
292 #endif
293 #define __NR_sys_statx __NR_statx
294 
295 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
296 #define __NR__llseek __NR_lseek
297 #endif
298 
299 /* Newer kernel ports have llseek() instead of _llseek() */
300 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
301 #define TARGET_NR__llseek TARGET_NR_llseek
302 #endif
303 
304 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
305 #ifndef TARGET_O_NONBLOCK_MASK
306 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
307 #endif
308 
309 #define __NR_sys_gettid __NR_gettid
310 _syscall0(int, sys_gettid)
311 
312 /* For the 64-bit guest on 32-bit host case we must emulate
313  * getdents using getdents64, because otherwise the host
314  * might hand us back more dirent records than we can fit
315  * into the guest buffer after structure format conversion.
316  * Otherwise we emulate getdents with getdents if the host has it.
317  */
318 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
319 #define EMULATE_GETDENTS_WITH_GETDENTS
320 #endif
321 
322 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
323 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
324 #endif
325 #if (defined(TARGET_NR_getdents) && \
326       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
327     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
328 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
329 #endif
330 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
331 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
332           loff_t *, res, uint, wh);
333 #endif
334 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
335 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
336           siginfo_t *, uinfo)
337 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
338 #ifdef __NR_exit_group
339 _syscall1(int,exit_group,int,error_code)
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347           const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 static timer_t g_posix_timers[32] = { 0, } ;
519 
520 static inline int next_free_host_timer(void)
521 {
522     int k ;
523     /* FIXME: Does finding the next free slot require a lock? */
524     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
525         if (g_posix_timers[k] == 0) {
526             g_posix_timers[k] = (timer_t) 1;
527             return k;
528         }
529     }
530     return -1;
531 }
532 #endif
533 
534 static inline int host_to_target_errno(int host_errno)
535 {
536     switch (host_errno) {
537 #define E(X)  case X: return TARGET_##X;
538 #include "errnos.c.inc"
539 #undef E
540     default:
541         return host_errno;
542     }
543 }
544 
545 static inline int target_to_host_errno(int target_errno)
546 {
547     switch (target_errno) {
548 #define E(X)  case TARGET_##X: return X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return target_errno;
553     }
554 }
555 
556 abi_long get_errno(abi_long ret)
557 {
558     if (ret == -1)
559         return -host_to_target_errno(errno);
560     else
561         return ret;
562 }
563 
564 const char *target_strerror(int err)
565 {
566     if (err == QEMU_ERESTARTSYS) {
567         return "To be restarted";
568     }
569     if (err == QEMU_ESIGRETURN) {
570         return "Successful exit from sigreturn";
571     }
572 
573     return strerror(target_to_host_errno(err));
574 }
575 
576 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
577 {
578     int i;
579     uint8_t b;
580     if (usize <= ksize) {
581         return 1;
582     }
583     for (i = ksize; i < usize; i++) {
584         if (get_user_u8(b, addr + i)) {
585             return -TARGET_EFAULT;
586         }
587         if (b != 0) {
588             return 0;
589         }
590     }
591     return 1;
592 }
593 
594 #define safe_syscall0(type, name) \
595 static type safe_##name(void) \
596 { \
597     return safe_syscall(__NR_##name); \
598 }
599 
600 #define safe_syscall1(type, name, type1, arg1) \
601 static type safe_##name(type1 arg1) \
602 { \
603     return safe_syscall(__NR_##name, arg1); \
604 }
605 
606 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
607 static type safe_##name(type1 arg1, type2 arg2) \
608 { \
609     return safe_syscall(__NR_##name, arg1, arg2); \
610 }
611 
612 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
613 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
616 }
617 
618 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
621 { \
622     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
623 }
624 
625 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
626     type4, arg4, type5, arg5) \
627 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
628     type5 arg5) \
629 { \
630     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
631 }
632 
633 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
634     type4, arg4, type5, arg5, type6, arg6) \
635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
636     type5 arg5, type6 arg6) \
637 { \
638     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
639 }
640 
641 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
642 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
643 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
644               int, flags, mode_t, mode)
645 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
646 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
647               struct rusage *, rusage)
648 #endif
649 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
650               int, options, struct rusage *, rusage)
651 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
652 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
653     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
654 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
655               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
656 #endif
657 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
658 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
659               struct timespec *, tsp, const sigset_t *, sigmask,
660               size_t, sigsetsize)
661 #endif
662 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
663               int, maxevents, int, timeout, const sigset_t *, sigmask,
664               size_t, sigsetsize)
665 #if defined(__NR_futex)
666 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
667               const struct timespec *,timeout,int *,uaddr2,int,val3)
668 #endif
669 #if defined(__NR_futex_time64)
670 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
671               const struct timespec *,timeout,int *,uaddr2,int,val3)
672 #endif
673 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
674 safe_syscall2(int, kill, pid_t, pid, int, sig)
675 safe_syscall2(int, tkill, int, tid, int, sig)
676 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
677 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
678 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
679 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
680               unsigned long, pos_l, unsigned long, pos_h)
681 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
682               unsigned long, pos_l, unsigned long, pos_h)
683 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
684               socklen_t, addrlen)
685 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
686               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
687 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
688               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
689 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
690 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
691 safe_syscall2(int, flock, int, fd, int, operation)
692 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
693 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
694               const struct timespec *, uts, size_t, sigsetsize)
695 #endif
696 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
697               int, flags)
698 #if defined(TARGET_NR_nanosleep)
699 safe_syscall2(int, nanosleep, const struct timespec *, req,
700               struct timespec *, rem)
701 #endif
702 #if defined(TARGET_NR_clock_nanosleep) || \
703     defined(TARGET_NR_clock_nanosleep_time64)
704 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
705               const struct timespec *, req, struct timespec *, rem)
706 #endif
707 #ifdef __NR_ipc
708 #ifdef __s390x__
709 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
710               void *, ptr)
711 #else
712 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
713               void *, ptr, long, fifth)
714 #endif
715 #endif
716 #ifdef __NR_msgsnd
717 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
718               int, flags)
719 #endif
720 #ifdef __NR_msgrcv
721 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
722               long, msgtype, int, flags)
723 #endif
724 #ifdef __NR_semtimedop
725 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
726               unsigned, nsops, const struct timespec *, timeout)
727 #endif
728 #if defined(TARGET_NR_mq_timedsend) || \
729     defined(TARGET_NR_mq_timedsend_time64)
730 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
731               size_t, len, unsigned, prio, const struct timespec *, timeout)
732 #endif
733 #if defined(TARGET_NR_mq_timedreceive) || \
734     defined(TARGET_NR_mq_timedreceive_time64)
735 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
736               size_t, len, unsigned *, prio, const struct timespec *, timeout)
737 #endif
738 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
739 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
740               int, outfd, loff_t *, poutoff, size_t, length,
741               unsigned int, flags)
742 #endif
743 
744 /* We do ioctl like this rather than via safe_syscall3 to preserve the
745  * "third argument might be integer or pointer or not present" behaviour of
746  * the libc function.
747  */
748 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
749 /* Similarly for fcntl. Note that callers must always:
750  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
751  *  use the flock64 struct rather than unsuffixed flock
752  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
753  */
754 #ifdef __NR_fcntl64
755 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
756 #else
757 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
758 #endif
759 
760 static inline int host_to_target_sock_type(int host_type)
761 {
762     int target_type;
763 
764     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
765     case SOCK_DGRAM:
766         target_type = TARGET_SOCK_DGRAM;
767         break;
768     case SOCK_STREAM:
769         target_type = TARGET_SOCK_STREAM;
770         break;
771     default:
772         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
773         break;
774     }
775 
776 #if defined(SOCK_CLOEXEC)
777     if (host_type & SOCK_CLOEXEC) {
778         target_type |= TARGET_SOCK_CLOEXEC;
779     }
780 #endif
781 
782 #if defined(SOCK_NONBLOCK)
783     if (host_type & SOCK_NONBLOCK) {
784         target_type |= TARGET_SOCK_NONBLOCK;
785     }
786 #endif
787 
788     return target_type;
789 }
790 
791 static abi_ulong target_brk;
792 static abi_ulong target_original_brk;
793 static abi_ulong brk_page;
794 
795 void target_set_brk(abi_ulong new_brk)
796 {
797     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
798     brk_page = HOST_PAGE_ALIGN(target_brk);
799 }
800 
801 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
802 #define DEBUGF_BRK(message, args...)
803 
804 /* do_brk() must return target values and target errnos. */
805 abi_long do_brk(abi_ulong new_brk)
806 {
807     abi_long mapped_addr;
808     abi_ulong new_alloc_size;
809 
810     /* brk pointers are always untagged */
811 
812     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
813 
814     if (!new_brk) {
815         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
816         return target_brk;
817     }
818     if (new_brk < target_original_brk) {
819         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
820                    target_brk);
821         return target_brk;
822     }
823 
824     /* If the new brk is less than the highest page reserved to the
825      * target heap allocation, set it and we're almost done...  */
826     if (new_brk <= brk_page) {
827         /* Heap contents are initialized to zero, as for anonymous
828          * mapped pages.  */
829         if (new_brk > target_brk) {
830             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
831         }
832 	target_brk = new_brk;
833         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
834 	return target_brk;
835     }
836 
837     /* We need to allocate more memory after the brk... Note that
838      * we don't use MAP_FIXED because that will map over the top of
839      * any existing mapping (like the one with the host libc or qemu
840      * itself); instead we treat "mapped but at wrong address" as
841      * a failure and unmap again.
842      */
843     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
844     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
845                                         PROT_READ|PROT_WRITE,
846                                         MAP_ANON|MAP_PRIVATE, 0, 0));
847 
848     if (mapped_addr == brk_page) {
849         /* Heap contents are initialized to zero, as for anonymous
850          * mapped pages.  Technically the new pages are already
851          * initialized to zero since they *are* anonymous mapped
852          * pages, however we have to take care with the contents that
853          * come from the remaining part of the previous page: it may
854          * contains garbage data due to a previous heap usage (grown
855          * then shrunken).  */
856         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
857 
858         target_brk = new_brk;
859         brk_page = HOST_PAGE_ALIGN(target_brk);
860         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
861             target_brk);
862         return target_brk;
863     } else if (mapped_addr != -1) {
864         /* Mapped but at wrong address, meaning there wasn't actually
865          * enough space for this brk.
866          */
867         target_munmap(mapped_addr, new_alloc_size);
868         mapped_addr = -1;
869         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
870     }
871     else {
872         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
873     }
874 
875 #if defined(TARGET_ALPHA)
876     /* We (partially) emulate OSF/1 on Alpha, which requires we
877        return a proper errno, not an unchanged brk value.  */
878     return -TARGET_ENOMEM;
879 #endif
880     /* For everything else, return the previous break. */
881     return target_brk;
882 }
883 
884 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
885     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
886 static inline abi_long copy_from_user_fdset(fd_set *fds,
887                                             abi_ulong target_fds_addr,
888                                             int n)
889 {
890     int i, nw, j, k;
891     abi_ulong b, *target_fds;
892 
893     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
894     if (!(target_fds = lock_user(VERIFY_READ,
895                                  target_fds_addr,
896                                  sizeof(abi_ulong) * nw,
897                                  1)))
898         return -TARGET_EFAULT;
899 
900     FD_ZERO(fds);
901     k = 0;
902     for (i = 0; i < nw; i++) {
903         /* grab the abi_ulong */
904         __get_user(b, &target_fds[i]);
905         for (j = 0; j < TARGET_ABI_BITS; j++) {
906             /* check the bit inside the abi_ulong */
907             if ((b >> j) & 1)
908                 FD_SET(k, fds);
909             k++;
910         }
911     }
912 
913     unlock_user(target_fds, target_fds_addr, 0);
914 
915     return 0;
916 }
917 
918 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
919                                                  abi_ulong target_fds_addr,
920                                                  int n)
921 {
922     if (target_fds_addr) {
923         if (copy_from_user_fdset(fds, target_fds_addr, n))
924             return -TARGET_EFAULT;
925         *fds_ptr = fds;
926     } else {
927         *fds_ptr = NULL;
928     }
929     return 0;
930 }
931 
932 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
933                                           const fd_set *fds,
934                                           int n)
935 {
936     int i, nw, j, k;
937     abi_long v;
938     abi_ulong *target_fds;
939 
940     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
941     if (!(target_fds = lock_user(VERIFY_WRITE,
942                                  target_fds_addr,
943                                  sizeof(abi_ulong) * nw,
944                                  0)))
945         return -TARGET_EFAULT;
946 
947     k = 0;
948     for (i = 0; i < nw; i++) {
949         v = 0;
950         for (j = 0; j < TARGET_ABI_BITS; j++) {
951             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
952             k++;
953         }
954         __put_user(v, &target_fds[i]);
955     }
956 
957     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
958 
959     return 0;
960 }
961 #endif
962 
963 #if defined(__alpha__)
964 #define HOST_HZ 1024
965 #else
966 #define HOST_HZ 100
967 #endif
968 
969 static inline abi_long host_to_target_clock_t(long ticks)
970 {
971 #if HOST_HZ == TARGET_HZ
972     return ticks;
973 #else
974     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
975 #endif
976 }
977 
978 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
979                                              const struct rusage *rusage)
980 {
981     struct target_rusage *target_rusage;
982 
983     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
984         return -TARGET_EFAULT;
985     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
986     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
987     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
988     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
989     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
990     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
991     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
992     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
993     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
994     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
995     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
996     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
997     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
998     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
999     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1000     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1001     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1002     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1003     unlock_user_struct(target_rusage, target_addr, 1);
1004 
1005     return 0;
1006 }
1007 
1008 #ifdef TARGET_NR_setrlimit
1009 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1010 {
1011     abi_ulong target_rlim_swap;
1012     rlim_t result;
1013 
1014     target_rlim_swap = tswapal(target_rlim);
1015     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1016         return RLIM_INFINITY;
1017 
1018     result = target_rlim_swap;
1019     if (target_rlim_swap != (rlim_t)result)
1020         return RLIM_INFINITY;
1021 
1022     return result;
1023 }
1024 #endif
1025 
1026 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1027 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1028 {
1029     abi_ulong target_rlim_swap;
1030     abi_ulong result;
1031 
1032     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1033         target_rlim_swap = TARGET_RLIM_INFINITY;
1034     else
1035         target_rlim_swap = rlim;
1036     result = tswapal(target_rlim_swap);
1037 
1038     return result;
1039 }
1040 #endif
1041 
1042 static inline int target_to_host_resource(int code)
1043 {
1044     switch (code) {
1045     case TARGET_RLIMIT_AS:
1046         return RLIMIT_AS;
1047     case TARGET_RLIMIT_CORE:
1048         return RLIMIT_CORE;
1049     case TARGET_RLIMIT_CPU:
1050         return RLIMIT_CPU;
1051     case TARGET_RLIMIT_DATA:
1052         return RLIMIT_DATA;
1053     case TARGET_RLIMIT_FSIZE:
1054         return RLIMIT_FSIZE;
1055     case TARGET_RLIMIT_LOCKS:
1056         return RLIMIT_LOCKS;
1057     case TARGET_RLIMIT_MEMLOCK:
1058         return RLIMIT_MEMLOCK;
1059     case TARGET_RLIMIT_MSGQUEUE:
1060         return RLIMIT_MSGQUEUE;
1061     case TARGET_RLIMIT_NICE:
1062         return RLIMIT_NICE;
1063     case TARGET_RLIMIT_NOFILE:
1064         return RLIMIT_NOFILE;
1065     case TARGET_RLIMIT_NPROC:
1066         return RLIMIT_NPROC;
1067     case TARGET_RLIMIT_RSS:
1068         return RLIMIT_RSS;
1069     case TARGET_RLIMIT_RTPRIO:
1070         return RLIMIT_RTPRIO;
1071 #ifdef RLIMIT_RTTIME
1072     case TARGET_RLIMIT_RTTIME:
1073         return RLIMIT_RTTIME;
1074 #endif
1075     case TARGET_RLIMIT_SIGPENDING:
1076         return RLIMIT_SIGPENDING;
1077     case TARGET_RLIMIT_STACK:
1078         return RLIMIT_STACK;
1079     default:
1080         return code;
1081     }
1082 }
1083 
1084 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1085                                               abi_ulong target_tv_addr)
1086 {
1087     struct target_timeval *target_tv;
1088 
1089     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1090         return -TARGET_EFAULT;
1091     }
1092 
1093     __get_user(tv->tv_sec, &target_tv->tv_sec);
1094     __get_user(tv->tv_usec, &target_tv->tv_usec);
1095 
1096     unlock_user_struct(target_tv, target_tv_addr, 0);
1097 
1098     return 0;
1099 }
1100 
1101 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1102                                             const struct timeval *tv)
1103 {
1104     struct target_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __put_user(tv->tv_sec, &target_tv->tv_sec);
1111     __put_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 1);
1114 
1115     return 0;
1116 }
1117 
1118 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1119 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1120                                                 abi_ulong target_tv_addr)
1121 {
1122     struct target__kernel_sock_timeval *target_tv;
1123 
1124     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1125         return -TARGET_EFAULT;
1126     }
1127 
1128     __get_user(tv->tv_sec, &target_tv->tv_sec);
1129     __get_user(tv->tv_usec, &target_tv->tv_usec);
1130 
1131     unlock_user_struct(target_tv, target_tv_addr, 0);
1132 
1133     return 0;
1134 }
1135 #endif
1136 
1137 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1138                                               const struct timeval *tv)
1139 {
1140     struct target__kernel_sock_timeval *target_tv;
1141 
1142     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1143         return -TARGET_EFAULT;
1144     }
1145 
1146     __put_user(tv->tv_sec, &target_tv->tv_sec);
1147     __put_user(tv->tv_usec, &target_tv->tv_usec);
1148 
1149     unlock_user_struct(target_tv, target_tv_addr, 1);
1150 
1151     return 0;
1152 }
1153 
1154 #if defined(TARGET_NR_futex) || \
1155     defined(TARGET_NR_rt_sigtimedwait) || \
1156     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1157     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1158     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1159     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1160     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1161     defined(TARGET_NR_timer_settime) || \
1162     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1163 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1164                                                abi_ulong target_addr)
1165 {
1166     struct target_timespec *target_ts;
1167 
1168     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1169         return -TARGET_EFAULT;
1170     }
1171     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1172     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1173     unlock_user_struct(target_ts, target_addr, 0);
1174     return 0;
1175 }
1176 #endif
1177 
1178 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1179     defined(TARGET_NR_timer_settime64) || \
1180     defined(TARGET_NR_mq_timedsend_time64) || \
1181     defined(TARGET_NR_mq_timedreceive_time64) || \
1182     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1183     defined(TARGET_NR_clock_nanosleep_time64) || \
1184     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1185     defined(TARGET_NR_utimensat) || \
1186     defined(TARGET_NR_utimensat_time64) || \
1187     defined(TARGET_NR_semtimedop_time64) || \
1188     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1189 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1190                                                  abi_ulong target_addr)
1191 {
1192     struct target__kernel_timespec *target_ts;
1193 
1194     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1195         return -TARGET_EFAULT;
1196     }
1197     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1198     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199     /* in 32bit mode, this drops the padding */
1200     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1201     unlock_user_struct(target_ts, target_addr, 0);
1202     return 0;
1203 }
1204 #endif
1205 
1206 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1207                                                struct timespec *host_ts)
1208 {
1209     struct target_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     unlock_user_struct(target_ts, target_addr, 1);
1217     return 0;
1218 }
1219 
1220 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1221                                                  struct timespec *host_ts)
1222 {
1223     struct target__kernel_timespec *target_ts;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1229     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230     unlock_user_struct(target_ts, target_addr, 1);
1231     return 0;
1232 }
1233 
1234 #if defined(TARGET_NR_gettimeofday)
1235 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1236                                              struct timezone *tz)
1237 {
1238     struct target_timezone *target_tz;
1239 
1240     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1241         return -TARGET_EFAULT;
1242     }
1243 
1244     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1245     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1246 
1247     unlock_user_struct(target_tz, target_tz_addr, 1);
1248 
1249     return 0;
1250 }
1251 #endif
1252 
1253 #if defined(TARGET_NR_settimeofday)
1254 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1255                                                abi_ulong target_tz_addr)
1256 {
1257     struct target_timezone *target_tz;
1258 
1259     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1260         return -TARGET_EFAULT;
1261     }
1262 
1263     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1264     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1265 
1266     unlock_user_struct(target_tz, target_tz_addr, 0);
1267 
1268     return 0;
1269 }
1270 #endif
1271 
1272 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1273 #include <mqueue.h>
1274 
1275 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1276                                               abi_ulong target_mq_attr_addr)
1277 {
1278     struct target_mq_attr *target_mq_attr;
1279 
1280     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1281                           target_mq_attr_addr, 1))
1282         return -TARGET_EFAULT;
1283 
1284     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1285     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1286     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1287     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1288 
1289     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1290 
1291     return 0;
1292 }
1293 
1294 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1295                                             const struct mq_attr *attr)
1296 {
1297     struct target_mq_attr *target_mq_attr;
1298 
1299     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1300                           target_mq_attr_addr, 0))
1301         return -TARGET_EFAULT;
1302 
1303     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1304     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1305     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1306     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1307 
1308     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1309 
1310     return 0;
1311 }
1312 #endif
1313 
1314 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1315 /* do_select() must return target values and target errnos. */
1316 static abi_long do_select(int n,
1317                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1318                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1319 {
1320     fd_set rfds, wfds, efds;
1321     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1322     struct timeval tv;
1323     struct timespec ts, *ts_ptr;
1324     abi_long ret;
1325 
1326     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1327     if (ret) {
1328         return ret;
1329     }
1330     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1331     if (ret) {
1332         return ret;
1333     }
1334     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338 
1339     if (target_tv_addr) {
1340         if (copy_from_user_timeval(&tv, target_tv_addr))
1341             return -TARGET_EFAULT;
1342         ts.tv_sec = tv.tv_sec;
1343         ts.tv_nsec = tv.tv_usec * 1000;
1344         ts_ptr = &ts;
1345     } else {
1346         ts_ptr = NULL;
1347     }
1348 
1349     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1350                                   ts_ptr, NULL));
1351 
1352     if (!is_error(ret)) {
1353         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1354             return -TARGET_EFAULT;
1355         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1356             return -TARGET_EFAULT;
1357         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1358             return -TARGET_EFAULT;
1359 
1360         if (target_tv_addr) {
1361             tv.tv_sec = ts.tv_sec;
1362             tv.tv_usec = ts.tv_nsec / 1000;
1363             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1364                 return -TARGET_EFAULT;
1365             }
1366         }
1367     }
1368 
1369     return ret;
1370 }
1371 
1372 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1373 static abi_long do_old_select(abi_ulong arg1)
1374 {
1375     struct target_sel_arg_struct *sel;
1376     abi_ulong inp, outp, exp, tvp;
1377     long nsel;
1378 
1379     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1380         return -TARGET_EFAULT;
1381     }
1382 
1383     nsel = tswapal(sel->n);
1384     inp = tswapal(sel->inp);
1385     outp = tswapal(sel->outp);
1386     exp = tswapal(sel->exp);
1387     tvp = tswapal(sel->tvp);
1388 
1389     unlock_user_struct(sel, arg1, 0);
1390 
1391     return do_select(nsel, inp, outp, exp, tvp);
1392 }
1393 #endif
1394 #endif
1395 
1396 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1397 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1398                             abi_long arg4, abi_long arg5, abi_long arg6,
1399                             bool time64)
1400 {
1401     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1402     fd_set rfds, wfds, efds;
1403     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1404     struct timespec ts, *ts_ptr;
1405     abi_long ret;
1406 
1407     /*
1408      * The 6th arg is actually two args smashed together,
1409      * so we cannot use the C library.
1410      */
1411     struct {
1412         sigset_t *set;
1413         size_t size;
1414     } sig, *sig_ptr;
1415 
1416     abi_ulong arg_sigset, arg_sigsize, *arg7;
1417 
1418     n = arg1;
1419     rfd_addr = arg2;
1420     wfd_addr = arg3;
1421     efd_addr = arg4;
1422     ts_addr = arg5;
1423 
1424     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1425     if (ret) {
1426         return ret;
1427     }
1428     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1429     if (ret) {
1430         return ret;
1431     }
1432     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436 
1437     /*
1438      * This takes a timespec, and not a timeval, so we cannot
1439      * use the do_select() helper ...
1440      */
1441     if (ts_addr) {
1442         if (time64) {
1443             if (target_to_host_timespec64(&ts, ts_addr)) {
1444                 return -TARGET_EFAULT;
1445             }
1446         } else {
1447             if (target_to_host_timespec(&ts, ts_addr)) {
1448                 return -TARGET_EFAULT;
1449             }
1450         }
1451             ts_ptr = &ts;
1452     } else {
1453         ts_ptr = NULL;
1454     }
1455 
1456     /* Extract the two packed args for the sigset */
1457     sig_ptr = NULL;
1458     if (arg6) {
1459         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1460         if (!arg7) {
1461             return -TARGET_EFAULT;
1462         }
1463         arg_sigset = tswapal(arg7[0]);
1464         arg_sigsize = tswapal(arg7[1]);
1465         unlock_user(arg7, arg6, 0);
1466 
1467         if (arg_sigset) {
1468             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1469             if (ret != 0) {
1470                 return ret;
1471             }
1472             sig_ptr = &sig;
1473             sig.size = SIGSET_T_SIZE;
1474         }
1475     }
1476 
1477     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1478                                   ts_ptr, sig_ptr));
1479 
1480     if (sig_ptr) {
1481         finish_sigsuspend_mask(ret);
1482     }
1483 
1484     if (!is_error(ret)) {
1485         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1486             return -TARGET_EFAULT;
1487         }
1488         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1489             return -TARGET_EFAULT;
1490         }
1491         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (time64) {
1495             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1496                 return -TARGET_EFAULT;
1497             }
1498         } else {
1499             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1500                 return -TARGET_EFAULT;
1501             }
1502         }
1503     }
1504     return ret;
1505 }
1506 #endif
1507 
1508 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1509     defined(TARGET_NR_ppoll_time64)
1510 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1511                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1512 {
1513     struct target_pollfd *target_pfd;
1514     unsigned int nfds = arg2;
1515     struct pollfd *pfd;
1516     unsigned int i;
1517     abi_long ret;
1518 
1519     pfd = NULL;
1520     target_pfd = NULL;
1521     if (nfds) {
1522         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1523             return -TARGET_EINVAL;
1524         }
1525         target_pfd = lock_user(VERIFY_WRITE, arg1,
1526                                sizeof(struct target_pollfd) * nfds, 1);
1527         if (!target_pfd) {
1528             return -TARGET_EFAULT;
1529         }
1530 
1531         pfd = alloca(sizeof(struct pollfd) * nfds);
1532         for (i = 0; i < nfds; i++) {
1533             pfd[i].fd = tswap32(target_pfd[i].fd);
1534             pfd[i].events = tswap16(target_pfd[i].events);
1535         }
1536     }
1537     if (ppoll) {
1538         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1539         sigset_t *set = NULL;
1540 
1541         if (arg3) {
1542             if (time64) {
1543                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1544                     unlock_user(target_pfd, arg1, 0);
1545                     return -TARGET_EFAULT;
1546                 }
1547             } else {
1548                 if (target_to_host_timespec(timeout_ts, arg3)) {
1549                     unlock_user(target_pfd, arg1, 0);
1550                     return -TARGET_EFAULT;
1551                 }
1552             }
1553         } else {
1554             timeout_ts = NULL;
1555         }
1556 
1557         if (arg4) {
1558             ret = process_sigsuspend_mask(&set, arg4, arg5);
1559             if (ret != 0) {
1560                 unlock_user(target_pfd, arg1, 0);
1561                 return ret;
1562             }
1563         }
1564 
1565         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1566                                    set, SIGSET_T_SIZE));
1567 
1568         if (set) {
1569             finish_sigsuspend_mask(ret);
1570         }
1571         if (!is_error(ret) && arg3) {
1572             if (time64) {
1573                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1574                     return -TARGET_EFAULT;
1575                 }
1576             } else {
1577                 if (host_to_target_timespec(arg3, timeout_ts)) {
1578                     return -TARGET_EFAULT;
1579                 }
1580             }
1581         }
1582     } else {
1583           struct timespec ts, *pts;
1584 
1585           if (arg3 >= 0) {
1586               /* Convert ms to secs, ns */
1587               ts.tv_sec = arg3 / 1000;
1588               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1589               pts = &ts;
1590           } else {
1591               /* -ve poll() timeout means "infinite" */
1592               pts = NULL;
1593           }
1594           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1595     }
1596 
1597     if (!is_error(ret)) {
1598         for (i = 0; i < nfds; i++) {
1599             target_pfd[i].revents = tswap16(pfd[i].revents);
1600         }
1601     }
1602     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1603     return ret;
1604 }
1605 #endif
1606 
1607 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1608                         int flags, int is_pipe2)
1609 {
1610     int host_pipe[2];
1611     abi_long ret;
1612     ret = pipe2(host_pipe, flags);
1613 
1614     if (is_error(ret))
1615         return get_errno(ret);
1616 
1617     /* Several targets have special calling conventions for the original
1618        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1619     if (!is_pipe2) {
1620 #if defined(TARGET_ALPHA)
1621         cpu_env->ir[IR_A4] = host_pipe[1];
1622         return host_pipe[0];
1623 #elif defined(TARGET_MIPS)
1624         cpu_env->active_tc.gpr[3] = host_pipe[1];
1625         return host_pipe[0];
1626 #elif defined(TARGET_SH4)
1627         cpu_env->gregs[1] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_SPARC)
1630         cpu_env->regwptr[1] = host_pipe[1];
1631         return host_pipe[0];
1632 #endif
1633     }
1634 
1635     if (put_user_s32(host_pipe[0], pipedes)
1636         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1637         return -TARGET_EFAULT;
1638     return get_errno(ret);
1639 }
1640 
1641 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1642                                               abi_ulong target_addr,
1643                                               socklen_t len)
1644 {
1645     struct target_ip_mreqn *target_smreqn;
1646 
1647     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1648     if (!target_smreqn)
1649         return -TARGET_EFAULT;
1650     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1651     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1652     if (len == sizeof(struct target_ip_mreqn))
1653         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1654     unlock_user(target_smreqn, target_addr, 0);
1655 
1656     return 0;
1657 }
1658 
1659 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1660                                                abi_ulong target_addr,
1661                                                socklen_t len)
1662 {
1663     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1664     sa_family_t sa_family;
1665     struct target_sockaddr *target_saddr;
1666 
1667     if (fd_trans_target_to_host_addr(fd)) {
1668         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1669     }
1670 
1671     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1672     if (!target_saddr)
1673         return -TARGET_EFAULT;
1674 
1675     sa_family = tswap16(target_saddr->sa_family);
1676 
1677     /* Oops. The caller might send a incomplete sun_path; sun_path
1678      * must be terminated by \0 (see the manual page), but
1679      * unfortunately it is quite common to specify sockaddr_un
1680      * length as "strlen(x->sun_path)" while it should be
1681      * "strlen(...) + 1". We'll fix that here if needed.
1682      * Linux kernel has a similar feature.
1683      */
1684 
1685     if (sa_family == AF_UNIX) {
1686         if (len < unix_maxlen && len > 0) {
1687             char *cp = (char*)target_saddr;
1688 
1689             if ( cp[len-1] && !cp[len] )
1690                 len++;
1691         }
1692         if (len > unix_maxlen)
1693             len = unix_maxlen;
1694     }
1695 
1696     memcpy(addr, target_saddr, len);
1697     addr->sa_family = sa_family;
1698     if (sa_family == AF_NETLINK) {
1699         struct sockaddr_nl *nladdr;
1700 
1701         nladdr = (struct sockaddr_nl *)addr;
1702         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1703         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1704     } else if (sa_family == AF_PACKET) {
1705 	struct target_sockaddr_ll *lladdr;
1706 
1707 	lladdr = (struct target_sockaddr_ll *)addr;
1708 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1709 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1710     }
1711     unlock_user(target_saddr, target_addr, 0);
1712 
1713     return 0;
1714 }
1715 
1716 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1717                                                struct sockaddr *addr,
1718                                                socklen_t len)
1719 {
1720     struct target_sockaddr *target_saddr;
1721 
1722     if (len == 0) {
1723         return 0;
1724     }
1725     assert(addr);
1726 
1727     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1728     if (!target_saddr)
1729         return -TARGET_EFAULT;
1730     memcpy(target_saddr, addr, len);
1731     if (len >= offsetof(struct target_sockaddr, sa_family) +
1732         sizeof(target_saddr->sa_family)) {
1733         target_saddr->sa_family = tswap16(addr->sa_family);
1734     }
1735     if (addr->sa_family == AF_NETLINK &&
1736         len >= sizeof(struct target_sockaddr_nl)) {
1737         struct target_sockaddr_nl *target_nl =
1738                (struct target_sockaddr_nl *)target_saddr;
1739         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1740         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1741     } else if (addr->sa_family == AF_PACKET) {
1742         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1743         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1744         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1745     } else if (addr->sa_family == AF_INET6 &&
1746                len >= sizeof(struct target_sockaddr_in6)) {
1747         struct target_sockaddr_in6 *target_in6 =
1748                (struct target_sockaddr_in6 *)target_saddr;
1749         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1750     }
1751     unlock_user(target_saddr, target_addr, len);
1752 
1753     return 0;
1754 }
1755 
1756 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1757                                            struct target_msghdr *target_msgh)
1758 {
1759     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1760     abi_long msg_controllen;
1761     abi_ulong target_cmsg_addr;
1762     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1763     socklen_t space = 0;
1764 
1765     msg_controllen = tswapal(target_msgh->msg_controllen);
1766     if (msg_controllen < sizeof (struct target_cmsghdr))
1767         goto the_end;
1768     target_cmsg_addr = tswapal(target_msgh->msg_control);
1769     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1770     target_cmsg_start = target_cmsg;
1771     if (!target_cmsg)
1772         return -TARGET_EFAULT;
1773 
1774     while (cmsg && target_cmsg) {
1775         void *data = CMSG_DATA(cmsg);
1776         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1777 
1778         int len = tswapal(target_cmsg->cmsg_len)
1779             - sizeof(struct target_cmsghdr);
1780 
1781         space += CMSG_SPACE(len);
1782         if (space > msgh->msg_controllen) {
1783             space -= CMSG_SPACE(len);
1784             /* This is a QEMU bug, since we allocated the payload
1785              * area ourselves (unlike overflow in host-to-target
1786              * conversion, which is just the guest giving us a buffer
1787              * that's too small). It can't happen for the payload types
1788              * we currently support; if it becomes an issue in future
1789              * we would need to improve our allocation strategy to
1790              * something more intelligent than "twice the size of the
1791              * target buffer we're reading from".
1792              */
1793             qemu_log_mask(LOG_UNIMP,
1794                           ("Unsupported ancillary data %d/%d: "
1795                            "unhandled msg size\n"),
1796                           tswap32(target_cmsg->cmsg_level),
1797                           tswap32(target_cmsg->cmsg_type));
1798             break;
1799         }
1800 
1801         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1802             cmsg->cmsg_level = SOL_SOCKET;
1803         } else {
1804             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1805         }
1806         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1807         cmsg->cmsg_len = CMSG_LEN(len);
1808 
1809         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1810             int *fd = (int *)data;
1811             int *target_fd = (int *)target_data;
1812             int i, numfds = len / sizeof(int);
1813 
1814             for (i = 0; i < numfds; i++) {
1815                 __get_user(fd[i], target_fd + i);
1816             }
1817         } else if (cmsg->cmsg_level == SOL_SOCKET
1818                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1819             struct ucred *cred = (struct ucred *)data;
1820             struct target_ucred *target_cred =
1821                 (struct target_ucred *)target_data;
1822 
1823             __get_user(cred->pid, &target_cred->pid);
1824             __get_user(cred->uid, &target_cred->uid);
1825             __get_user(cred->gid, &target_cred->gid);
1826         } else {
1827             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1828                           cmsg->cmsg_level, cmsg->cmsg_type);
1829             memcpy(data, target_data, len);
1830         }
1831 
1832         cmsg = CMSG_NXTHDR(msgh, cmsg);
1833         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1834                                          target_cmsg_start);
1835     }
1836     unlock_user(target_cmsg, target_cmsg_addr, 0);
1837  the_end:
1838     msgh->msg_controllen = space;
1839     return 0;
1840 }
1841 
1842 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1843                                            struct msghdr *msgh)
1844 {
1845     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1846     abi_long msg_controllen;
1847     abi_ulong target_cmsg_addr;
1848     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1849     socklen_t space = 0;
1850 
1851     msg_controllen = tswapal(target_msgh->msg_controllen);
1852     if (msg_controllen < sizeof (struct target_cmsghdr))
1853         goto the_end;
1854     target_cmsg_addr = tswapal(target_msgh->msg_control);
1855     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1856     target_cmsg_start = target_cmsg;
1857     if (!target_cmsg)
1858         return -TARGET_EFAULT;
1859 
1860     while (cmsg && target_cmsg) {
1861         void *data = CMSG_DATA(cmsg);
1862         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1863 
1864         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1865         int tgt_len, tgt_space;
1866 
1867         /* We never copy a half-header but may copy half-data;
1868          * this is Linux's behaviour in put_cmsg(). Note that
1869          * truncation here is a guest problem (which we report
1870          * to the guest via the CTRUNC bit), unlike truncation
1871          * in target_to_host_cmsg, which is a QEMU bug.
1872          */
1873         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1874             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1875             break;
1876         }
1877 
1878         if (cmsg->cmsg_level == SOL_SOCKET) {
1879             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1880         } else {
1881             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1882         }
1883         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1884 
1885         /* Payload types which need a different size of payload on
1886          * the target must adjust tgt_len here.
1887          */
1888         tgt_len = len;
1889         switch (cmsg->cmsg_level) {
1890         case SOL_SOCKET:
1891             switch (cmsg->cmsg_type) {
1892             case SO_TIMESTAMP:
1893                 tgt_len = sizeof(struct target_timeval);
1894                 break;
1895             default:
1896                 break;
1897             }
1898             break;
1899         default:
1900             break;
1901         }
1902 
1903         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1904             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1905             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1906         }
1907 
1908         /* We must now copy-and-convert len bytes of payload
1909          * into tgt_len bytes of destination space. Bear in mind
1910          * that in both source and destination we may be dealing
1911          * with a truncated value!
1912          */
1913         switch (cmsg->cmsg_level) {
1914         case SOL_SOCKET:
1915             switch (cmsg->cmsg_type) {
1916             case SCM_RIGHTS:
1917             {
1918                 int *fd = (int *)data;
1919                 int *target_fd = (int *)target_data;
1920                 int i, numfds = tgt_len / sizeof(int);
1921 
1922                 for (i = 0; i < numfds; i++) {
1923                     __put_user(fd[i], target_fd + i);
1924                 }
1925                 break;
1926             }
1927             case SO_TIMESTAMP:
1928             {
1929                 struct timeval *tv = (struct timeval *)data;
1930                 struct target_timeval *target_tv =
1931                     (struct target_timeval *)target_data;
1932 
1933                 if (len != sizeof(struct timeval) ||
1934                     tgt_len != sizeof(struct target_timeval)) {
1935                     goto unimplemented;
1936                 }
1937 
1938                 /* copy struct timeval to target */
1939                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1940                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1941                 break;
1942             }
1943             case SCM_CREDENTIALS:
1944             {
1945                 struct ucred *cred = (struct ucred *)data;
1946                 struct target_ucred *target_cred =
1947                     (struct target_ucred *)target_data;
1948 
1949                 __put_user(cred->pid, &target_cred->pid);
1950                 __put_user(cred->uid, &target_cred->uid);
1951                 __put_user(cred->gid, &target_cred->gid);
1952                 break;
1953             }
1954             default:
1955                 goto unimplemented;
1956             }
1957             break;
1958 
1959         case SOL_IP:
1960             switch (cmsg->cmsg_type) {
1961             case IP_TTL:
1962             {
1963                 uint32_t *v = (uint32_t *)data;
1964                 uint32_t *t_int = (uint32_t *)target_data;
1965 
1966                 if (len != sizeof(uint32_t) ||
1967                     tgt_len != sizeof(uint32_t)) {
1968                     goto unimplemented;
1969                 }
1970                 __put_user(*v, t_int);
1971                 break;
1972             }
1973             case IP_RECVERR:
1974             {
1975                 struct errhdr_t {
1976                    struct sock_extended_err ee;
1977                    struct sockaddr_in offender;
1978                 };
1979                 struct errhdr_t *errh = (struct errhdr_t *)data;
1980                 struct errhdr_t *target_errh =
1981                     (struct errhdr_t *)target_data;
1982 
1983                 if (len != sizeof(struct errhdr_t) ||
1984                     tgt_len != sizeof(struct errhdr_t)) {
1985                     goto unimplemented;
1986                 }
1987                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1988                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1989                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1990                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1991                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1992                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1993                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1994                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1995                     (void *) &errh->offender, sizeof(errh->offender));
1996                 break;
1997             }
1998             default:
1999                 goto unimplemented;
2000             }
2001             break;
2002 
2003         case SOL_IPV6:
2004             switch (cmsg->cmsg_type) {
2005             case IPV6_HOPLIMIT:
2006             {
2007                 uint32_t *v = (uint32_t *)data;
2008                 uint32_t *t_int = (uint32_t *)target_data;
2009 
2010                 if (len != sizeof(uint32_t) ||
2011                     tgt_len != sizeof(uint32_t)) {
2012                     goto unimplemented;
2013                 }
2014                 __put_user(*v, t_int);
2015                 break;
2016             }
2017             case IPV6_RECVERR:
2018             {
2019                 struct errhdr6_t {
2020                    struct sock_extended_err ee;
2021                    struct sockaddr_in6 offender;
2022                 };
2023                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2024                 struct errhdr6_t *target_errh =
2025                     (struct errhdr6_t *)target_data;
2026 
2027                 if (len != sizeof(struct errhdr6_t) ||
2028                     tgt_len != sizeof(struct errhdr6_t)) {
2029                     goto unimplemented;
2030                 }
2031                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2032                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2033                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2034                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2035                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2036                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2037                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2038                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2039                     (void *) &errh->offender, sizeof(errh->offender));
2040                 break;
2041             }
2042             default:
2043                 goto unimplemented;
2044             }
2045             break;
2046 
2047         default:
2048         unimplemented:
2049             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2050                           cmsg->cmsg_level, cmsg->cmsg_type);
2051             memcpy(target_data, data, MIN(len, tgt_len));
2052             if (tgt_len > len) {
2053                 memset(target_data + len, 0, tgt_len - len);
2054             }
2055         }
2056 
2057         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2058         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2059         if (msg_controllen < tgt_space) {
2060             tgt_space = msg_controllen;
2061         }
2062         msg_controllen -= tgt_space;
2063         space += tgt_space;
2064         cmsg = CMSG_NXTHDR(msgh, cmsg);
2065         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2066                                          target_cmsg_start);
2067     }
2068     unlock_user(target_cmsg, target_cmsg_addr, space);
2069  the_end:
2070     target_msgh->msg_controllen = tswapal(space);
2071     return 0;
2072 }
2073 
2074 /* do_setsockopt() Must return target values and target errnos. */
2075 static abi_long do_setsockopt(int sockfd, int level, int optname,
2076                               abi_ulong optval_addr, socklen_t optlen)
2077 {
2078     abi_long ret;
2079     int val;
2080     struct ip_mreqn *ip_mreq;
2081     struct ip_mreq_source *ip_mreq_source;
2082 
2083     switch(level) {
2084     case SOL_TCP:
2085     case SOL_UDP:
2086         /* TCP and UDP options all take an 'int' value.  */
2087         if (optlen < sizeof(uint32_t))
2088             return -TARGET_EINVAL;
2089 
2090         if (get_user_u32(val, optval_addr))
2091             return -TARGET_EFAULT;
2092         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2093         break;
2094     case SOL_IP:
2095         switch(optname) {
2096         case IP_TOS:
2097         case IP_TTL:
2098         case IP_HDRINCL:
2099         case IP_ROUTER_ALERT:
2100         case IP_RECVOPTS:
2101         case IP_RETOPTS:
2102         case IP_PKTINFO:
2103         case IP_MTU_DISCOVER:
2104         case IP_RECVERR:
2105         case IP_RECVTTL:
2106         case IP_RECVTOS:
2107 #ifdef IP_FREEBIND
2108         case IP_FREEBIND:
2109 #endif
2110         case IP_MULTICAST_TTL:
2111         case IP_MULTICAST_LOOP:
2112             val = 0;
2113             if (optlen >= sizeof(uint32_t)) {
2114                 if (get_user_u32(val, optval_addr))
2115                     return -TARGET_EFAULT;
2116             } else if (optlen >= 1) {
2117                 if (get_user_u8(val, optval_addr))
2118                     return -TARGET_EFAULT;
2119             }
2120             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2121             break;
2122         case IP_ADD_MEMBERSHIP:
2123         case IP_DROP_MEMBERSHIP:
2124             if (optlen < sizeof (struct target_ip_mreq) ||
2125                 optlen > sizeof (struct target_ip_mreqn))
2126                 return -TARGET_EINVAL;
2127 
2128             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2129             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2130             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2131             break;
2132 
2133         case IP_BLOCK_SOURCE:
2134         case IP_UNBLOCK_SOURCE:
2135         case IP_ADD_SOURCE_MEMBERSHIP:
2136         case IP_DROP_SOURCE_MEMBERSHIP:
2137             if (optlen != sizeof (struct target_ip_mreq_source))
2138                 return -TARGET_EINVAL;
2139 
2140             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2141             if (!ip_mreq_source) {
2142                 return -TARGET_EFAULT;
2143             }
2144             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2145             unlock_user (ip_mreq_source, optval_addr, 0);
2146             break;
2147 
2148         default:
2149             goto unimplemented;
2150         }
2151         break;
2152     case SOL_IPV6:
2153         switch (optname) {
2154         case IPV6_MTU_DISCOVER:
2155         case IPV6_MTU:
2156         case IPV6_V6ONLY:
2157         case IPV6_RECVPKTINFO:
2158         case IPV6_UNICAST_HOPS:
2159         case IPV6_MULTICAST_HOPS:
2160         case IPV6_MULTICAST_LOOP:
2161         case IPV6_RECVERR:
2162         case IPV6_RECVHOPLIMIT:
2163         case IPV6_2292HOPLIMIT:
2164         case IPV6_CHECKSUM:
2165         case IPV6_ADDRFORM:
2166         case IPV6_2292PKTINFO:
2167         case IPV6_RECVTCLASS:
2168         case IPV6_RECVRTHDR:
2169         case IPV6_2292RTHDR:
2170         case IPV6_RECVHOPOPTS:
2171         case IPV6_2292HOPOPTS:
2172         case IPV6_RECVDSTOPTS:
2173         case IPV6_2292DSTOPTS:
2174         case IPV6_TCLASS:
2175         case IPV6_ADDR_PREFERENCES:
2176 #ifdef IPV6_RECVPATHMTU
2177         case IPV6_RECVPATHMTU:
2178 #endif
2179 #ifdef IPV6_TRANSPARENT
2180         case IPV6_TRANSPARENT:
2181 #endif
2182 #ifdef IPV6_FREEBIND
2183         case IPV6_FREEBIND:
2184 #endif
2185 #ifdef IPV6_RECVORIGDSTADDR
2186         case IPV6_RECVORIGDSTADDR:
2187 #endif
2188             val = 0;
2189             if (optlen < sizeof(uint32_t)) {
2190                 return -TARGET_EINVAL;
2191             }
2192             if (get_user_u32(val, optval_addr)) {
2193                 return -TARGET_EFAULT;
2194             }
2195             ret = get_errno(setsockopt(sockfd, level, optname,
2196                                        &val, sizeof(val)));
2197             break;
2198         case IPV6_PKTINFO:
2199         {
2200             struct in6_pktinfo pki;
2201 
2202             if (optlen < sizeof(pki)) {
2203                 return -TARGET_EINVAL;
2204             }
2205 
2206             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2207                 return -TARGET_EFAULT;
2208             }
2209 
2210             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2211 
2212             ret = get_errno(setsockopt(sockfd, level, optname,
2213                                        &pki, sizeof(pki)));
2214             break;
2215         }
2216         case IPV6_ADD_MEMBERSHIP:
2217         case IPV6_DROP_MEMBERSHIP:
2218         {
2219             struct ipv6_mreq ipv6mreq;
2220 
2221             if (optlen < sizeof(ipv6mreq)) {
2222                 return -TARGET_EINVAL;
2223             }
2224 
2225             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2226                 return -TARGET_EFAULT;
2227             }
2228 
2229             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2230 
2231             ret = get_errno(setsockopt(sockfd, level, optname,
2232                                        &ipv6mreq, sizeof(ipv6mreq)));
2233             break;
2234         }
2235         default:
2236             goto unimplemented;
2237         }
2238         break;
2239     case SOL_ICMPV6:
2240         switch (optname) {
2241         case ICMPV6_FILTER:
2242         {
2243             struct icmp6_filter icmp6f;
2244 
2245             if (optlen > sizeof(icmp6f)) {
2246                 optlen = sizeof(icmp6f);
2247             }
2248 
2249             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2250                 return -TARGET_EFAULT;
2251             }
2252 
2253             for (val = 0; val < 8; val++) {
2254                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2255             }
2256 
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &icmp6f, optlen));
2259             break;
2260         }
2261         default:
2262             goto unimplemented;
2263         }
2264         break;
2265     case SOL_RAW:
2266         switch (optname) {
2267         case ICMP_FILTER:
2268         case IPV6_CHECKSUM:
2269             /* those take an u32 value */
2270             if (optlen < sizeof(uint32_t)) {
2271                 return -TARGET_EINVAL;
2272             }
2273 
2274             if (get_user_u32(val, optval_addr)) {
2275                 return -TARGET_EFAULT;
2276             }
2277             ret = get_errno(setsockopt(sockfd, level, optname,
2278                                        &val, sizeof(val)));
2279             break;
2280 
2281         default:
2282             goto unimplemented;
2283         }
2284         break;
2285 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2286     case SOL_ALG:
2287         switch (optname) {
2288         case ALG_SET_KEY:
2289         {
2290             char *alg_key = g_malloc(optlen);
2291 
2292             if (!alg_key) {
2293                 return -TARGET_ENOMEM;
2294             }
2295             if (copy_from_user(alg_key, optval_addr, optlen)) {
2296                 g_free(alg_key);
2297                 return -TARGET_EFAULT;
2298             }
2299             ret = get_errno(setsockopt(sockfd, level, optname,
2300                                        alg_key, optlen));
2301             g_free(alg_key);
2302             break;
2303         }
2304         case ALG_SET_AEAD_AUTHSIZE:
2305         {
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        NULL, optlen));
2308             break;
2309         }
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314 #endif
2315     case TARGET_SOL_SOCKET:
2316         switch (optname) {
2317         case TARGET_SO_RCVTIMEO:
2318         {
2319                 struct timeval tv;
2320 
2321                 optname = SO_RCVTIMEO;
2322 
2323 set_timeout:
2324                 if (optlen != sizeof(struct target_timeval)) {
2325                     return -TARGET_EINVAL;
2326                 }
2327 
2328                 if (copy_from_user_timeval(&tv, optval_addr)) {
2329                     return -TARGET_EFAULT;
2330                 }
2331 
2332                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2333                                 &tv, sizeof(tv)));
2334                 return ret;
2335         }
2336         case TARGET_SO_SNDTIMEO:
2337                 optname = SO_SNDTIMEO;
2338                 goto set_timeout;
2339         case TARGET_SO_ATTACH_FILTER:
2340         {
2341                 struct target_sock_fprog *tfprog;
2342                 struct target_sock_filter *tfilter;
2343                 struct sock_fprog fprog;
2344                 struct sock_filter *filter;
2345                 int i;
2346 
2347                 if (optlen != sizeof(*tfprog)) {
2348                     return -TARGET_EINVAL;
2349                 }
2350                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2351                     return -TARGET_EFAULT;
2352                 }
2353                 if (!lock_user_struct(VERIFY_READ, tfilter,
2354                                       tswapal(tfprog->filter), 0)) {
2355                     unlock_user_struct(tfprog, optval_addr, 1);
2356                     return -TARGET_EFAULT;
2357                 }
2358 
2359                 fprog.len = tswap16(tfprog->len);
2360                 filter = g_try_new(struct sock_filter, fprog.len);
2361                 if (filter == NULL) {
2362                     unlock_user_struct(tfilter, tfprog->filter, 1);
2363                     unlock_user_struct(tfprog, optval_addr, 1);
2364                     return -TARGET_ENOMEM;
2365                 }
2366                 for (i = 0; i < fprog.len; i++) {
2367                     filter[i].code = tswap16(tfilter[i].code);
2368                     filter[i].jt = tfilter[i].jt;
2369                     filter[i].jf = tfilter[i].jf;
2370                     filter[i].k = tswap32(tfilter[i].k);
2371                 }
2372                 fprog.filter = filter;
2373 
2374                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2375                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2376                 g_free(filter);
2377 
2378                 unlock_user_struct(tfilter, tfprog->filter, 1);
2379                 unlock_user_struct(tfprog, optval_addr, 1);
2380                 return ret;
2381         }
2382 	case TARGET_SO_BINDTODEVICE:
2383 	{
2384 		char *dev_ifname, *addr_ifname;
2385 
2386 		if (optlen > IFNAMSIZ - 1) {
2387 		    optlen = IFNAMSIZ - 1;
2388 		}
2389 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2390 		if (!dev_ifname) {
2391 		    return -TARGET_EFAULT;
2392 		}
2393 		optname = SO_BINDTODEVICE;
2394 		addr_ifname = alloca(IFNAMSIZ);
2395 		memcpy(addr_ifname, dev_ifname, optlen);
2396 		addr_ifname[optlen] = 0;
2397 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2398                                            addr_ifname, optlen));
2399 		unlock_user (dev_ifname, optval_addr, 0);
2400 		return ret;
2401 	}
2402         case TARGET_SO_LINGER:
2403         {
2404                 struct linger lg;
2405                 struct target_linger *tlg;
2406 
2407                 if (optlen != sizeof(struct target_linger)) {
2408                     return -TARGET_EINVAL;
2409                 }
2410                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2411                     return -TARGET_EFAULT;
2412                 }
2413                 __get_user(lg.l_onoff, &tlg->l_onoff);
2414                 __get_user(lg.l_linger, &tlg->l_linger);
2415                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2416                                 &lg, sizeof(lg)));
2417                 unlock_user_struct(tlg, optval_addr, 0);
2418                 return ret;
2419         }
2420             /* Options with 'int' argument.  */
2421         case TARGET_SO_DEBUG:
2422 		optname = SO_DEBUG;
2423 		break;
2424         case TARGET_SO_REUSEADDR:
2425 		optname = SO_REUSEADDR;
2426 		break;
2427 #ifdef SO_REUSEPORT
2428         case TARGET_SO_REUSEPORT:
2429                 optname = SO_REUSEPORT;
2430                 break;
2431 #endif
2432         case TARGET_SO_TYPE:
2433 		optname = SO_TYPE;
2434 		break;
2435         case TARGET_SO_ERROR:
2436 		optname = SO_ERROR;
2437 		break;
2438         case TARGET_SO_DONTROUTE:
2439 		optname = SO_DONTROUTE;
2440 		break;
2441         case TARGET_SO_BROADCAST:
2442 		optname = SO_BROADCAST;
2443 		break;
2444         case TARGET_SO_SNDBUF:
2445 		optname = SO_SNDBUF;
2446 		break;
2447         case TARGET_SO_SNDBUFFORCE:
2448                 optname = SO_SNDBUFFORCE;
2449                 break;
2450         case TARGET_SO_RCVBUF:
2451 		optname = SO_RCVBUF;
2452 		break;
2453         case TARGET_SO_RCVBUFFORCE:
2454                 optname = SO_RCVBUFFORCE;
2455                 break;
2456         case TARGET_SO_KEEPALIVE:
2457 		optname = SO_KEEPALIVE;
2458 		break;
2459         case TARGET_SO_OOBINLINE:
2460 		optname = SO_OOBINLINE;
2461 		break;
2462         case TARGET_SO_NO_CHECK:
2463 		optname = SO_NO_CHECK;
2464 		break;
2465         case TARGET_SO_PRIORITY:
2466 		optname = SO_PRIORITY;
2467 		break;
2468 #ifdef SO_BSDCOMPAT
2469         case TARGET_SO_BSDCOMPAT:
2470 		optname = SO_BSDCOMPAT;
2471 		break;
2472 #endif
2473         case TARGET_SO_PASSCRED:
2474 		optname = SO_PASSCRED;
2475 		break;
2476         case TARGET_SO_PASSSEC:
2477                 optname = SO_PASSSEC;
2478                 break;
2479         case TARGET_SO_TIMESTAMP:
2480 		optname = SO_TIMESTAMP;
2481 		break;
2482         case TARGET_SO_RCVLOWAT:
2483 		optname = SO_RCVLOWAT;
2484 		break;
2485         default:
2486             goto unimplemented;
2487         }
2488 	if (optlen < sizeof(uint32_t))
2489             return -TARGET_EINVAL;
2490 
2491 	if (get_user_u32(val, optval_addr))
2492             return -TARGET_EFAULT;
2493 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2494         break;
2495 #ifdef SOL_NETLINK
2496     case SOL_NETLINK:
2497         switch (optname) {
2498         case NETLINK_PKTINFO:
2499         case NETLINK_ADD_MEMBERSHIP:
2500         case NETLINK_DROP_MEMBERSHIP:
2501         case NETLINK_BROADCAST_ERROR:
2502         case NETLINK_NO_ENOBUFS:
2503 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2504         case NETLINK_LISTEN_ALL_NSID:
2505         case NETLINK_CAP_ACK:
2506 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2508         case NETLINK_EXT_ACK:
2509 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2510 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2511         case NETLINK_GET_STRICT_CHK:
2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2513             break;
2514         default:
2515             goto unimplemented;
2516         }
2517         val = 0;
2518         if (optlen < sizeof(uint32_t)) {
2519             return -TARGET_EINVAL;
2520         }
2521         if (get_user_u32(val, optval_addr)) {
2522             return -TARGET_EFAULT;
2523         }
2524         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2525                                    sizeof(val)));
2526         break;
2527 #endif /* SOL_NETLINK */
2528     default:
2529     unimplemented:
2530         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2531                       level, optname);
2532         ret = -TARGET_ENOPROTOOPT;
2533     }
2534     return ret;
2535 }
2536 
2537 /* do_getsockopt() Must return target values and target errnos. */
2538 static abi_long do_getsockopt(int sockfd, int level, int optname,
2539                               abi_ulong optval_addr, abi_ulong optlen)
2540 {
2541     abi_long ret;
2542     int len, val;
2543     socklen_t lv;
2544 
2545     switch(level) {
2546     case TARGET_SOL_SOCKET:
2547         level = SOL_SOCKET;
2548         switch (optname) {
2549         /* These don't just return a single integer */
2550         case TARGET_SO_PEERNAME:
2551             goto unimplemented;
2552         case TARGET_SO_RCVTIMEO: {
2553             struct timeval tv;
2554             socklen_t tvlen;
2555 
2556             optname = SO_RCVTIMEO;
2557 
2558 get_timeout:
2559             if (get_user_u32(len, optlen)) {
2560                 return -TARGET_EFAULT;
2561             }
2562             if (len < 0) {
2563                 return -TARGET_EINVAL;
2564             }
2565 
2566             tvlen = sizeof(tv);
2567             ret = get_errno(getsockopt(sockfd, level, optname,
2568                                        &tv, &tvlen));
2569             if (ret < 0) {
2570                 return ret;
2571             }
2572             if (len > sizeof(struct target_timeval)) {
2573                 len = sizeof(struct target_timeval);
2574             }
2575             if (copy_to_user_timeval(optval_addr, &tv)) {
2576                 return -TARGET_EFAULT;
2577             }
2578             if (put_user_u32(len, optlen)) {
2579                 return -TARGET_EFAULT;
2580             }
2581             break;
2582         }
2583         case TARGET_SO_SNDTIMEO:
2584             optname = SO_SNDTIMEO;
2585             goto get_timeout;
2586         case TARGET_SO_PEERCRED: {
2587             struct ucred cr;
2588             socklen_t crlen;
2589             struct target_ucred *tcr;
2590 
2591             if (get_user_u32(len, optlen)) {
2592                 return -TARGET_EFAULT;
2593             }
2594             if (len < 0) {
2595                 return -TARGET_EINVAL;
2596             }
2597 
2598             crlen = sizeof(cr);
2599             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2600                                        &cr, &crlen));
2601             if (ret < 0) {
2602                 return ret;
2603             }
2604             if (len > crlen) {
2605                 len = crlen;
2606             }
2607             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             __put_user(cr.pid, &tcr->pid);
2611             __put_user(cr.uid, &tcr->uid);
2612             __put_user(cr.gid, &tcr->gid);
2613             unlock_user_struct(tcr, optval_addr, 1);
2614             if (put_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             break;
2618         }
2619         case TARGET_SO_PEERSEC: {
2620             char *name;
2621 
2622             if (get_user_u32(len, optlen)) {
2623                 return -TARGET_EFAULT;
2624             }
2625             if (len < 0) {
2626                 return -TARGET_EINVAL;
2627             }
2628             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2629             if (!name) {
2630                 return -TARGET_EFAULT;
2631             }
2632             lv = len;
2633             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2634                                        name, &lv));
2635             if (put_user_u32(lv, optlen)) {
2636                 ret = -TARGET_EFAULT;
2637             }
2638             unlock_user(name, optval_addr, lv);
2639             break;
2640         }
2641         case TARGET_SO_LINGER:
2642         {
2643             struct linger lg;
2644             socklen_t lglen;
2645             struct target_linger *tlg;
2646 
2647             if (get_user_u32(len, optlen)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             if (len < 0) {
2651                 return -TARGET_EINVAL;
2652             }
2653 
2654             lglen = sizeof(lg);
2655             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2656                                        &lg, &lglen));
2657             if (ret < 0) {
2658                 return ret;
2659             }
2660             if (len > lglen) {
2661                 len = lglen;
2662             }
2663             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             __put_user(lg.l_onoff, &tlg->l_onoff);
2667             __put_user(lg.l_linger, &tlg->l_linger);
2668             unlock_user_struct(tlg, optval_addr, 1);
2669             if (put_user_u32(len, optlen)) {
2670                 return -TARGET_EFAULT;
2671             }
2672             break;
2673         }
2674         /* Options with 'int' argument.  */
2675         case TARGET_SO_DEBUG:
2676             optname = SO_DEBUG;
2677             goto int_case;
2678         case TARGET_SO_REUSEADDR:
2679             optname = SO_REUSEADDR;
2680             goto int_case;
2681 #ifdef SO_REUSEPORT
2682         case TARGET_SO_REUSEPORT:
2683             optname = SO_REUSEPORT;
2684             goto int_case;
2685 #endif
2686         case TARGET_SO_TYPE:
2687             optname = SO_TYPE;
2688             goto int_case;
2689         case TARGET_SO_ERROR:
2690             optname = SO_ERROR;
2691             goto int_case;
2692         case TARGET_SO_DONTROUTE:
2693             optname = SO_DONTROUTE;
2694             goto int_case;
2695         case TARGET_SO_BROADCAST:
2696             optname = SO_BROADCAST;
2697             goto int_case;
2698         case TARGET_SO_SNDBUF:
2699             optname = SO_SNDBUF;
2700             goto int_case;
2701         case TARGET_SO_RCVBUF:
2702             optname = SO_RCVBUF;
2703             goto int_case;
2704         case TARGET_SO_KEEPALIVE:
2705             optname = SO_KEEPALIVE;
2706             goto int_case;
2707         case TARGET_SO_OOBINLINE:
2708             optname = SO_OOBINLINE;
2709             goto int_case;
2710         case TARGET_SO_NO_CHECK:
2711             optname = SO_NO_CHECK;
2712             goto int_case;
2713         case TARGET_SO_PRIORITY:
2714             optname = SO_PRIORITY;
2715             goto int_case;
2716 #ifdef SO_BSDCOMPAT
2717         case TARGET_SO_BSDCOMPAT:
2718             optname = SO_BSDCOMPAT;
2719             goto int_case;
2720 #endif
2721         case TARGET_SO_PASSCRED:
2722             optname = SO_PASSCRED;
2723             goto int_case;
2724         case TARGET_SO_TIMESTAMP:
2725             optname = SO_TIMESTAMP;
2726             goto int_case;
2727         case TARGET_SO_RCVLOWAT:
2728             optname = SO_RCVLOWAT;
2729             goto int_case;
2730         case TARGET_SO_ACCEPTCONN:
2731             optname = SO_ACCEPTCONN;
2732             goto int_case;
2733         case TARGET_SO_PROTOCOL:
2734             optname = SO_PROTOCOL;
2735             goto int_case;
2736         case TARGET_SO_DOMAIN:
2737             optname = SO_DOMAIN;
2738             goto int_case;
2739         default:
2740             goto int_case;
2741         }
2742         break;
2743     case SOL_TCP:
2744     case SOL_UDP:
2745         /* TCP and UDP options all take an 'int' value.  */
2746     int_case:
2747         if (get_user_u32(len, optlen))
2748             return -TARGET_EFAULT;
2749         if (len < 0)
2750             return -TARGET_EINVAL;
2751         lv = sizeof(lv);
2752         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2753         if (ret < 0)
2754             return ret;
2755         if (optname == SO_TYPE) {
2756             val = host_to_target_sock_type(val);
2757         }
2758         if (len > lv)
2759             len = lv;
2760         if (len == 4) {
2761             if (put_user_u32(val, optval_addr))
2762                 return -TARGET_EFAULT;
2763         } else {
2764             if (put_user_u8(val, optval_addr))
2765                 return -TARGET_EFAULT;
2766         }
2767         if (put_user_u32(len, optlen))
2768             return -TARGET_EFAULT;
2769         break;
2770     case SOL_IP:
2771         switch(optname) {
2772         case IP_TOS:
2773         case IP_TTL:
2774         case IP_HDRINCL:
2775         case IP_ROUTER_ALERT:
2776         case IP_RECVOPTS:
2777         case IP_RETOPTS:
2778         case IP_PKTINFO:
2779         case IP_MTU_DISCOVER:
2780         case IP_RECVERR:
2781         case IP_RECVTOS:
2782 #ifdef IP_FREEBIND
2783         case IP_FREEBIND:
2784 #endif
2785         case IP_MULTICAST_TTL:
2786         case IP_MULTICAST_LOOP:
2787             if (get_user_u32(len, optlen))
2788                 return -TARGET_EFAULT;
2789             if (len < 0)
2790                 return -TARGET_EINVAL;
2791             lv = sizeof(lv);
2792             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2793             if (ret < 0)
2794                 return ret;
2795             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2796                 len = 1;
2797                 if (put_user_u32(len, optlen)
2798                     || put_user_u8(val, optval_addr))
2799                     return -TARGET_EFAULT;
2800             } else {
2801                 if (len > sizeof(int))
2802                     len = sizeof(int);
2803                 if (put_user_u32(len, optlen)
2804                     || put_user_u32(val, optval_addr))
2805                     return -TARGET_EFAULT;
2806             }
2807             break;
2808         default:
2809             ret = -TARGET_ENOPROTOOPT;
2810             break;
2811         }
2812         break;
2813     case SOL_IPV6:
2814         switch (optname) {
2815         case IPV6_MTU_DISCOVER:
2816         case IPV6_MTU:
2817         case IPV6_V6ONLY:
2818         case IPV6_RECVPKTINFO:
2819         case IPV6_UNICAST_HOPS:
2820         case IPV6_MULTICAST_HOPS:
2821         case IPV6_MULTICAST_LOOP:
2822         case IPV6_RECVERR:
2823         case IPV6_RECVHOPLIMIT:
2824         case IPV6_2292HOPLIMIT:
2825         case IPV6_CHECKSUM:
2826         case IPV6_ADDRFORM:
2827         case IPV6_2292PKTINFO:
2828         case IPV6_RECVTCLASS:
2829         case IPV6_RECVRTHDR:
2830         case IPV6_2292RTHDR:
2831         case IPV6_RECVHOPOPTS:
2832         case IPV6_2292HOPOPTS:
2833         case IPV6_RECVDSTOPTS:
2834         case IPV6_2292DSTOPTS:
2835         case IPV6_TCLASS:
2836         case IPV6_ADDR_PREFERENCES:
2837 #ifdef IPV6_RECVPATHMTU
2838         case IPV6_RECVPATHMTU:
2839 #endif
2840 #ifdef IPV6_TRANSPARENT
2841         case IPV6_TRANSPARENT:
2842 #endif
2843 #ifdef IPV6_FREEBIND
2844         case IPV6_FREEBIND:
2845 #endif
2846 #ifdef IPV6_RECVORIGDSTADDR
2847         case IPV6_RECVORIGDSTADDR:
2848 #endif
2849             if (get_user_u32(len, optlen))
2850                 return -TARGET_EFAULT;
2851             if (len < 0)
2852                 return -TARGET_EINVAL;
2853             lv = sizeof(lv);
2854             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2855             if (ret < 0)
2856                 return ret;
2857             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2858                 len = 1;
2859                 if (put_user_u32(len, optlen)
2860                     || put_user_u8(val, optval_addr))
2861                     return -TARGET_EFAULT;
2862             } else {
2863                 if (len > sizeof(int))
2864                     len = sizeof(int);
2865                 if (put_user_u32(len, optlen)
2866                     || put_user_u32(val, optval_addr))
2867                     return -TARGET_EFAULT;
2868             }
2869             break;
2870         default:
2871             ret = -TARGET_ENOPROTOOPT;
2872             break;
2873         }
2874         break;
2875 #ifdef SOL_NETLINK
2876     case SOL_NETLINK:
2877         switch (optname) {
2878         case NETLINK_PKTINFO:
2879         case NETLINK_BROADCAST_ERROR:
2880         case NETLINK_NO_ENOBUFS:
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2882         case NETLINK_LISTEN_ALL_NSID:
2883         case NETLINK_CAP_ACK:
2884 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2885 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2886         case NETLINK_EXT_ACK:
2887 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2889         case NETLINK_GET_STRICT_CHK:
2890 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2891             if (get_user_u32(len, optlen)) {
2892                 return -TARGET_EFAULT;
2893             }
2894             if (len != sizeof(val)) {
2895                 return -TARGET_EINVAL;
2896             }
2897             lv = len;
2898             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2899             if (ret < 0) {
2900                 return ret;
2901             }
2902             if (put_user_u32(lv, optlen)
2903                 || put_user_u32(val, optval_addr)) {
2904                 return -TARGET_EFAULT;
2905             }
2906             break;
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2908         case NETLINK_LIST_MEMBERSHIPS:
2909         {
2910             uint32_t *results;
2911             int i;
2912             if (get_user_u32(len, optlen)) {
2913                 return -TARGET_EFAULT;
2914             }
2915             if (len < 0) {
2916                 return -TARGET_EINVAL;
2917             }
2918             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2919             if (!results && len > 0) {
2920                 return -TARGET_EFAULT;
2921             }
2922             lv = len;
2923             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2924             if (ret < 0) {
2925                 unlock_user(results, optval_addr, 0);
2926                 return ret;
2927             }
2928             /* swap host endianess to target endianess. */
2929             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2930                 results[i] = tswap32(results[i]);
2931             }
2932             if (put_user_u32(lv, optlen)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             unlock_user(results, optval_addr, 0);
2936             break;
2937         }
2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2939         default:
2940             goto unimplemented;
2941         }
2942         break;
2943 #endif /* SOL_NETLINK */
2944     default:
2945     unimplemented:
2946         qemu_log_mask(LOG_UNIMP,
2947                       "getsockopt level=%d optname=%d not yet supported\n",
2948                       level, optname);
2949         ret = -TARGET_EOPNOTSUPP;
2950         break;
2951     }
2952     return ret;
2953 }
2954 
2955 /* Convert target low/high pair representing file offset into the host
2956  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2957  * as the kernel doesn't handle them either.
2958  */
2959 static void target_to_host_low_high(abi_ulong tlow,
2960                                     abi_ulong thigh,
2961                                     unsigned long *hlow,
2962                                     unsigned long *hhigh)
2963 {
2964     uint64_t off = tlow |
2965         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2966         TARGET_LONG_BITS / 2;
2967 
2968     *hlow = off;
2969     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2970 }
2971 
2972 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2973                                 abi_ulong count, int copy)
2974 {
2975     struct target_iovec *target_vec;
2976     struct iovec *vec;
2977     abi_ulong total_len, max_len;
2978     int i;
2979     int err = 0;
2980     bool bad_address = false;
2981 
2982     if (count == 0) {
2983         errno = 0;
2984         return NULL;
2985     }
2986     if (count > IOV_MAX) {
2987         errno = EINVAL;
2988         return NULL;
2989     }
2990 
2991     vec = g_try_new0(struct iovec, count);
2992     if (vec == NULL) {
2993         errno = ENOMEM;
2994         return NULL;
2995     }
2996 
2997     target_vec = lock_user(VERIFY_READ, target_addr,
2998                            count * sizeof(struct target_iovec), 1);
2999     if (target_vec == NULL) {
3000         err = EFAULT;
3001         goto fail2;
3002     }
3003 
3004     /* ??? If host page size > target page size, this will result in a
3005        value larger than what we can actually support.  */
3006     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3007     total_len = 0;
3008 
3009     for (i = 0; i < count; i++) {
3010         abi_ulong base = tswapal(target_vec[i].iov_base);
3011         abi_long len = tswapal(target_vec[i].iov_len);
3012 
3013         if (len < 0) {
3014             err = EINVAL;
3015             goto fail;
3016         } else if (len == 0) {
3017             /* Zero length pointer is ignored.  */
3018             vec[i].iov_base = 0;
3019         } else {
3020             vec[i].iov_base = lock_user(type, base, len, copy);
3021             /* If the first buffer pointer is bad, this is a fault.  But
3022              * subsequent bad buffers will result in a partial write; this
3023              * is realized by filling the vector with null pointers and
3024              * zero lengths. */
3025             if (!vec[i].iov_base) {
3026                 if (i == 0) {
3027                     err = EFAULT;
3028                     goto fail;
3029                 } else {
3030                     bad_address = true;
3031                 }
3032             }
3033             if (bad_address) {
3034                 len = 0;
3035             }
3036             if (len > max_len - total_len) {
3037                 len = max_len - total_len;
3038             }
3039         }
3040         vec[i].iov_len = len;
3041         total_len += len;
3042     }
3043 
3044     unlock_user(target_vec, target_addr, 0);
3045     return vec;
3046 
3047  fail:
3048     while (--i >= 0) {
3049         if (tswapal(target_vec[i].iov_len) > 0) {
3050             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3051         }
3052     }
3053     unlock_user(target_vec, target_addr, 0);
3054  fail2:
3055     g_free(vec);
3056     errno = err;
3057     return NULL;
3058 }
3059 
3060 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3061                          abi_ulong count, int copy)
3062 {
3063     struct target_iovec *target_vec;
3064     int i;
3065 
3066     target_vec = lock_user(VERIFY_READ, target_addr,
3067                            count * sizeof(struct target_iovec), 1);
3068     if (target_vec) {
3069         for (i = 0; i < count; i++) {
3070             abi_ulong base = tswapal(target_vec[i].iov_base);
3071             abi_long len = tswapal(target_vec[i].iov_len);
3072             if (len < 0) {
3073                 break;
3074             }
3075             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3076         }
3077         unlock_user(target_vec, target_addr, 0);
3078     }
3079 
3080     g_free(vec);
3081 }
3082 
3083 static inline int target_to_host_sock_type(int *type)
3084 {
3085     int host_type = 0;
3086     int target_type = *type;
3087 
3088     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3089     case TARGET_SOCK_DGRAM:
3090         host_type = SOCK_DGRAM;
3091         break;
3092     case TARGET_SOCK_STREAM:
3093         host_type = SOCK_STREAM;
3094         break;
3095     default:
3096         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3097         break;
3098     }
3099     if (target_type & TARGET_SOCK_CLOEXEC) {
3100 #if defined(SOCK_CLOEXEC)
3101         host_type |= SOCK_CLOEXEC;
3102 #else
3103         return -TARGET_EINVAL;
3104 #endif
3105     }
3106     if (target_type & TARGET_SOCK_NONBLOCK) {
3107 #if defined(SOCK_NONBLOCK)
3108         host_type |= SOCK_NONBLOCK;
3109 #elif !defined(O_NONBLOCK)
3110         return -TARGET_EINVAL;
3111 #endif
3112     }
3113     *type = host_type;
3114     return 0;
3115 }
3116 
3117 /* Try to emulate socket type flags after socket creation.  */
3118 static int sock_flags_fixup(int fd, int target_type)
3119 {
3120 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3121     if (target_type & TARGET_SOCK_NONBLOCK) {
3122         int flags = fcntl(fd, F_GETFL);
3123         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3124             close(fd);
3125             return -TARGET_EINVAL;
3126         }
3127     }
3128 #endif
3129     return fd;
3130 }
3131 
3132 /* do_socket() Must return target values and target errnos. */
3133 static abi_long do_socket(int domain, int type, int protocol)
3134 {
3135     int target_type = type;
3136     int ret;
3137 
3138     ret = target_to_host_sock_type(&type);
3139     if (ret) {
3140         return ret;
3141     }
3142 
3143     if (domain == PF_NETLINK && !(
3144 #ifdef CONFIG_RTNETLINK
3145          protocol == NETLINK_ROUTE ||
3146 #endif
3147          protocol == NETLINK_KOBJECT_UEVENT ||
3148          protocol == NETLINK_AUDIT)) {
3149         return -TARGET_EPROTONOSUPPORT;
3150     }
3151 
3152     if (domain == AF_PACKET ||
3153         (domain == AF_INET && type == SOCK_PACKET)) {
3154         protocol = tswap16(protocol);
3155     }
3156 
3157     ret = get_errno(socket(domain, type, protocol));
3158     if (ret >= 0) {
3159         ret = sock_flags_fixup(ret, target_type);
3160         if (type == SOCK_PACKET) {
3161             /* Manage an obsolete case :
3162              * if socket type is SOCK_PACKET, bind by name
3163              */
3164             fd_trans_register(ret, &target_packet_trans);
3165         } else if (domain == PF_NETLINK) {
3166             switch (protocol) {
3167 #ifdef CONFIG_RTNETLINK
3168             case NETLINK_ROUTE:
3169                 fd_trans_register(ret, &target_netlink_route_trans);
3170                 break;
3171 #endif
3172             case NETLINK_KOBJECT_UEVENT:
3173                 /* nothing to do: messages are strings */
3174                 break;
3175             case NETLINK_AUDIT:
3176                 fd_trans_register(ret, &target_netlink_audit_trans);
3177                 break;
3178             default:
3179                 g_assert_not_reached();
3180             }
3181         }
3182     }
3183     return ret;
3184 }
3185 
3186 /* do_bind() Must return target values and target errnos. */
3187 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3188                         socklen_t addrlen)
3189 {
3190     void *addr;
3191     abi_long ret;
3192 
3193     if ((int)addrlen < 0) {
3194         return -TARGET_EINVAL;
3195     }
3196 
3197     addr = alloca(addrlen+1);
3198 
3199     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3200     if (ret)
3201         return ret;
3202 
3203     return get_errno(bind(sockfd, addr, addrlen));
3204 }
3205 
3206 /* do_connect() Must return target values and target errnos. */
3207 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3208                            socklen_t addrlen)
3209 {
3210     void *addr;
3211     abi_long ret;
3212 
3213     if ((int)addrlen < 0) {
3214         return -TARGET_EINVAL;
3215     }
3216 
3217     addr = alloca(addrlen+1);
3218 
3219     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3220     if (ret)
3221         return ret;
3222 
3223     return get_errno(safe_connect(sockfd, addr, addrlen));
3224 }
3225 
3226 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3227 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3228                                       int flags, int send)
3229 {
3230     abi_long ret, len;
3231     struct msghdr msg;
3232     abi_ulong count;
3233     struct iovec *vec;
3234     abi_ulong target_vec;
3235 
3236     if (msgp->msg_name) {
3237         msg.msg_namelen = tswap32(msgp->msg_namelen);
3238         msg.msg_name = alloca(msg.msg_namelen+1);
3239         ret = target_to_host_sockaddr(fd, msg.msg_name,
3240                                       tswapal(msgp->msg_name),
3241                                       msg.msg_namelen);
3242         if (ret == -TARGET_EFAULT) {
3243             /* For connected sockets msg_name and msg_namelen must
3244              * be ignored, so returning EFAULT immediately is wrong.
3245              * Instead, pass a bad msg_name to the host kernel, and
3246              * let it decide whether to return EFAULT or not.
3247              */
3248             msg.msg_name = (void *)-1;
3249         } else if (ret) {
3250             goto out2;
3251         }
3252     } else {
3253         msg.msg_name = NULL;
3254         msg.msg_namelen = 0;
3255     }
3256     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3257     msg.msg_control = alloca(msg.msg_controllen);
3258     memset(msg.msg_control, 0, msg.msg_controllen);
3259 
3260     msg.msg_flags = tswap32(msgp->msg_flags);
3261 
3262     count = tswapal(msgp->msg_iovlen);
3263     target_vec = tswapal(msgp->msg_iov);
3264 
3265     if (count > IOV_MAX) {
3266         /* sendrcvmsg returns a different errno for this condition than
3267          * readv/writev, so we must catch it here before lock_iovec() does.
3268          */
3269         ret = -TARGET_EMSGSIZE;
3270         goto out2;
3271     }
3272 
3273     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3274                      target_vec, count, send);
3275     if (vec == NULL) {
3276         ret = -host_to_target_errno(errno);
3277         goto out2;
3278     }
3279     msg.msg_iovlen = count;
3280     msg.msg_iov = vec;
3281 
3282     if (send) {
3283         if (fd_trans_target_to_host_data(fd)) {
3284             void *host_msg;
3285 
3286             host_msg = g_malloc(msg.msg_iov->iov_len);
3287             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3288             ret = fd_trans_target_to_host_data(fd)(host_msg,
3289                                                    msg.msg_iov->iov_len);
3290             if (ret >= 0) {
3291                 msg.msg_iov->iov_base = host_msg;
3292                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3293             }
3294             g_free(host_msg);
3295         } else {
3296             ret = target_to_host_cmsg(&msg, msgp);
3297             if (ret == 0) {
3298                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3299             }
3300         }
3301     } else {
3302         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3303         if (!is_error(ret)) {
3304             len = ret;
3305             if (fd_trans_host_to_target_data(fd)) {
3306                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3307                                                MIN(msg.msg_iov->iov_len, len));
3308             } else {
3309                 ret = host_to_target_cmsg(msgp, &msg);
3310             }
3311             if (!is_error(ret)) {
3312                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3313                 msgp->msg_flags = tswap32(msg.msg_flags);
3314                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3315                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3316                                     msg.msg_name, msg.msg_namelen);
3317                     if (ret) {
3318                         goto out;
3319                     }
3320                 }
3321 
3322                 ret = len;
3323             }
3324         }
3325     }
3326 
3327 out:
3328     unlock_iovec(vec, target_vec, count, !send);
3329 out2:
3330     return ret;
3331 }
3332 
3333 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3334                                int flags, int send)
3335 {
3336     abi_long ret;
3337     struct target_msghdr *msgp;
3338 
3339     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3340                           msgp,
3341                           target_msg,
3342                           send ? 1 : 0)) {
3343         return -TARGET_EFAULT;
3344     }
3345     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3346     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3347     return ret;
3348 }
3349 
3350 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3351  * so it might not have this *mmsg-specific flag either.
3352  */
3353 #ifndef MSG_WAITFORONE
3354 #define MSG_WAITFORONE 0x10000
3355 #endif
3356 
3357 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3358                                 unsigned int vlen, unsigned int flags,
3359                                 int send)
3360 {
3361     struct target_mmsghdr *mmsgp;
3362     abi_long ret = 0;
3363     int i;
3364 
3365     if (vlen > UIO_MAXIOV) {
3366         vlen = UIO_MAXIOV;
3367     }
3368 
3369     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3370     if (!mmsgp) {
3371         return -TARGET_EFAULT;
3372     }
3373 
3374     for (i = 0; i < vlen; i++) {
3375         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3376         if (is_error(ret)) {
3377             break;
3378         }
3379         mmsgp[i].msg_len = tswap32(ret);
3380         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3381         if (flags & MSG_WAITFORONE) {
3382             flags |= MSG_DONTWAIT;
3383         }
3384     }
3385 
3386     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3387 
3388     /* Return number of datagrams sent if we sent any at all;
3389      * otherwise return the error.
3390      */
3391     if (i) {
3392         return i;
3393     }
3394     return ret;
3395 }
3396 
3397 /* do_accept4() Must return target values and target errnos. */
3398 static abi_long do_accept4(int fd, abi_ulong target_addr,
3399                            abi_ulong target_addrlen_addr, int flags)
3400 {
3401     socklen_t addrlen, ret_addrlen;
3402     void *addr;
3403     abi_long ret;
3404     int host_flags;
3405 
3406     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3407 
3408     if (target_addr == 0) {
3409         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3410     }
3411 
3412     /* linux returns EFAULT if addrlen pointer is invalid */
3413     if (get_user_u32(addrlen, target_addrlen_addr))
3414         return -TARGET_EFAULT;
3415 
3416     if ((int)addrlen < 0) {
3417         return -TARGET_EINVAL;
3418     }
3419 
3420     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3421         return -TARGET_EFAULT;
3422     }
3423 
3424     addr = alloca(addrlen);
3425 
3426     ret_addrlen = addrlen;
3427     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3428     if (!is_error(ret)) {
3429         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3430         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3431             ret = -TARGET_EFAULT;
3432         }
3433     }
3434     return ret;
3435 }
3436 
3437 /* do_getpeername() Must return target values and target errnos. */
3438 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3439                                abi_ulong target_addrlen_addr)
3440 {
3441     socklen_t addrlen, ret_addrlen;
3442     void *addr;
3443     abi_long ret;
3444 
3445     if (get_user_u32(addrlen, target_addrlen_addr))
3446         return -TARGET_EFAULT;
3447 
3448     if ((int)addrlen < 0) {
3449         return -TARGET_EINVAL;
3450     }
3451 
3452     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3453         return -TARGET_EFAULT;
3454     }
3455 
3456     addr = alloca(addrlen);
3457 
3458     ret_addrlen = addrlen;
3459     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3460     if (!is_error(ret)) {
3461         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3462         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3463             ret = -TARGET_EFAULT;
3464         }
3465     }
3466     return ret;
3467 }
3468 
3469 /* do_getsockname() Must return target values and target errnos. */
3470 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3471                                abi_ulong target_addrlen_addr)
3472 {
3473     socklen_t addrlen, ret_addrlen;
3474     void *addr;
3475     abi_long ret;
3476 
3477     if (get_user_u32(addrlen, target_addrlen_addr))
3478         return -TARGET_EFAULT;
3479 
3480     if ((int)addrlen < 0) {
3481         return -TARGET_EINVAL;
3482     }
3483 
3484     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3485         return -TARGET_EFAULT;
3486     }
3487 
3488     addr = alloca(addrlen);
3489 
3490     ret_addrlen = addrlen;
3491     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3492     if (!is_error(ret)) {
3493         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3494         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3495             ret = -TARGET_EFAULT;
3496         }
3497     }
3498     return ret;
3499 }
3500 
3501 /* do_socketpair() Must return target values and target errnos. */
3502 static abi_long do_socketpair(int domain, int type, int protocol,
3503                               abi_ulong target_tab_addr)
3504 {
3505     int tab[2];
3506     abi_long ret;
3507 
3508     target_to_host_sock_type(&type);
3509 
3510     ret = get_errno(socketpair(domain, type, protocol, tab));
3511     if (!is_error(ret)) {
3512         if (put_user_s32(tab[0], target_tab_addr)
3513             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3514             ret = -TARGET_EFAULT;
3515     }
3516     return ret;
3517 }
3518 
3519 /* do_sendto() Must return target values and target errnos. */
3520 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3521                           abi_ulong target_addr, socklen_t addrlen)
3522 {
3523     void *addr;
3524     void *host_msg;
3525     void *copy_msg = NULL;
3526     abi_long ret;
3527 
3528     if ((int)addrlen < 0) {
3529         return -TARGET_EINVAL;
3530     }
3531 
3532     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3533     if (!host_msg)
3534         return -TARGET_EFAULT;
3535     if (fd_trans_target_to_host_data(fd)) {
3536         copy_msg = host_msg;
3537         host_msg = g_malloc(len);
3538         memcpy(host_msg, copy_msg, len);
3539         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3540         if (ret < 0) {
3541             goto fail;
3542         }
3543     }
3544     if (target_addr) {
3545         addr = alloca(addrlen+1);
3546         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3547         if (ret) {
3548             goto fail;
3549         }
3550         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3551     } else {
3552         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3553     }
3554 fail:
3555     if (copy_msg) {
3556         g_free(host_msg);
3557         host_msg = copy_msg;
3558     }
3559     unlock_user(host_msg, msg, 0);
3560     return ret;
3561 }
3562 
3563 /* do_recvfrom() Must return target values and target errnos. */
3564 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3565                             abi_ulong target_addr,
3566                             abi_ulong target_addrlen)
3567 {
3568     socklen_t addrlen, ret_addrlen;
3569     void *addr;
3570     void *host_msg;
3571     abi_long ret;
3572 
3573     if (!msg) {
3574         host_msg = NULL;
3575     } else {
3576         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3577         if (!host_msg) {
3578             return -TARGET_EFAULT;
3579         }
3580     }
3581     if (target_addr) {
3582         if (get_user_u32(addrlen, target_addrlen)) {
3583             ret = -TARGET_EFAULT;
3584             goto fail;
3585         }
3586         if ((int)addrlen < 0) {
3587             ret = -TARGET_EINVAL;
3588             goto fail;
3589         }
3590         addr = alloca(addrlen);
3591         ret_addrlen = addrlen;
3592         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3593                                       addr, &ret_addrlen));
3594     } else {
3595         addr = NULL; /* To keep compiler quiet.  */
3596         addrlen = 0; /* To keep compiler quiet.  */
3597         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3598     }
3599     if (!is_error(ret)) {
3600         if (fd_trans_host_to_target_data(fd)) {
3601             abi_long trans;
3602             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3603             if (is_error(trans)) {
3604                 ret = trans;
3605                 goto fail;
3606             }
3607         }
3608         if (target_addr) {
3609             host_to_target_sockaddr(target_addr, addr,
3610                                     MIN(addrlen, ret_addrlen));
3611             if (put_user_u32(ret_addrlen, target_addrlen)) {
3612                 ret = -TARGET_EFAULT;
3613                 goto fail;
3614             }
3615         }
3616         unlock_user(host_msg, msg, len);
3617     } else {
3618 fail:
3619         unlock_user(host_msg, msg, 0);
3620     }
3621     return ret;
3622 }
3623 
3624 #ifdef TARGET_NR_socketcall
3625 /* do_socketcall() must return target values and target errnos. */
3626 static abi_long do_socketcall(int num, abi_ulong vptr)
3627 {
3628     static const unsigned nargs[] = { /* number of arguments per operation */
3629         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3630         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3631         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3632         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3633         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3634         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3635         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3636         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3637         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3638         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3639         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3640         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3641         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3642         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3643         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3644         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3645         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3646         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3647         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3648         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3649     };
3650     abi_long a[6]; /* max 6 args */
3651     unsigned i;
3652 
3653     /* check the range of the first argument num */
3654     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3655     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3656         return -TARGET_EINVAL;
3657     }
3658     /* ensure we have space for args */
3659     if (nargs[num] > ARRAY_SIZE(a)) {
3660         return -TARGET_EINVAL;
3661     }
3662     /* collect the arguments in a[] according to nargs[] */
3663     for (i = 0; i < nargs[num]; ++i) {
3664         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3665             return -TARGET_EFAULT;
3666         }
3667     }
3668     /* now when we have the args, invoke the appropriate underlying function */
3669     switch (num) {
3670     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3671         return do_socket(a[0], a[1], a[2]);
3672     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3673         return do_bind(a[0], a[1], a[2]);
3674     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3675         return do_connect(a[0], a[1], a[2]);
3676     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3677         return get_errno(listen(a[0], a[1]));
3678     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3679         return do_accept4(a[0], a[1], a[2], 0);
3680     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3681         return do_getsockname(a[0], a[1], a[2]);
3682     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3683         return do_getpeername(a[0], a[1], a[2]);
3684     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3685         return do_socketpair(a[0], a[1], a[2], a[3]);
3686     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3687         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3688     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3689         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3690     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3691         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3692     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3693         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3694     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3695         return get_errno(shutdown(a[0], a[1]));
3696     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3697         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3698     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3699         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3700     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3701         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3702     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3703         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3704     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3705         return do_accept4(a[0], a[1], a[2], a[3]);
3706     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3707         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3708     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3709         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3710     default:
3711         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3712         return -TARGET_EINVAL;
3713     }
3714 }
3715 #endif
3716 
3717 #define N_SHM_REGIONS	32
3718 
3719 static struct shm_region {
3720     abi_ulong start;
3721     abi_ulong size;
3722     bool in_use;
3723 } shm_regions[N_SHM_REGIONS];
3724 
3725 #ifndef TARGET_SEMID64_DS
3726 /* asm-generic version of this struct */
3727 struct target_semid64_ds
3728 {
3729   struct target_ipc_perm sem_perm;
3730   abi_ulong sem_otime;
3731 #if TARGET_ABI_BITS == 32
3732   abi_ulong __unused1;
3733 #endif
3734   abi_ulong sem_ctime;
3735 #if TARGET_ABI_BITS == 32
3736   abi_ulong __unused2;
3737 #endif
3738   abi_ulong sem_nsems;
3739   abi_ulong __unused3;
3740   abi_ulong __unused4;
3741 };
3742 #endif
3743 
3744 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3745                                                abi_ulong target_addr)
3746 {
3747     struct target_ipc_perm *target_ip;
3748     struct target_semid64_ds *target_sd;
3749 
3750     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3751         return -TARGET_EFAULT;
3752     target_ip = &(target_sd->sem_perm);
3753     host_ip->__key = tswap32(target_ip->__key);
3754     host_ip->uid = tswap32(target_ip->uid);
3755     host_ip->gid = tswap32(target_ip->gid);
3756     host_ip->cuid = tswap32(target_ip->cuid);
3757     host_ip->cgid = tswap32(target_ip->cgid);
3758 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3759     host_ip->mode = tswap32(target_ip->mode);
3760 #else
3761     host_ip->mode = tswap16(target_ip->mode);
3762 #endif
3763 #if defined(TARGET_PPC)
3764     host_ip->__seq = tswap32(target_ip->__seq);
3765 #else
3766     host_ip->__seq = tswap16(target_ip->__seq);
3767 #endif
3768     unlock_user_struct(target_sd, target_addr, 0);
3769     return 0;
3770 }
3771 
3772 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3773                                                struct ipc_perm *host_ip)
3774 {
3775     struct target_ipc_perm *target_ip;
3776     struct target_semid64_ds *target_sd;
3777 
3778     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3779         return -TARGET_EFAULT;
3780     target_ip = &(target_sd->sem_perm);
3781     target_ip->__key = tswap32(host_ip->__key);
3782     target_ip->uid = tswap32(host_ip->uid);
3783     target_ip->gid = tswap32(host_ip->gid);
3784     target_ip->cuid = tswap32(host_ip->cuid);
3785     target_ip->cgid = tswap32(host_ip->cgid);
3786 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3787     target_ip->mode = tswap32(host_ip->mode);
3788 #else
3789     target_ip->mode = tswap16(host_ip->mode);
3790 #endif
3791 #if defined(TARGET_PPC)
3792     target_ip->__seq = tswap32(host_ip->__seq);
3793 #else
3794     target_ip->__seq = tswap16(host_ip->__seq);
3795 #endif
3796     unlock_user_struct(target_sd, target_addr, 1);
3797     return 0;
3798 }
3799 
3800 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3801                                                abi_ulong target_addr)
3802 {
3803     struct target_semid64_ds *target_sd;
3804 
3805     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3806         return -TARGET_EFAULT;
3807     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3808         return -TARGET_EFAULT;
3809     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3810     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3811     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3812     unlock_user_struct(target_sd, target_addr, 0);
3813     return 0;
3814 }
3815 
3816 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3817                                                struct semid_ds *host_sd)
3818 {
3819     struct target_semid64_ds *target_sd;
3820 
3821     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3822         return -TARGET_EFAULT;
3823     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3824         return -TARGET_EFAULT;
3825     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3826     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3827     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3828     unlock_user_struct(target_sd, target_addr, 1);
3829     return 0;
3830 }
3831 
3832 struct target_seminfo {
3833     int semmap;
3834     int semmni;
3835     int semmns;
3836     int semmnu;
3837     int semmsl;
3838     int semopm;
3839     int semume;
3840     int semusz;
3841     int semvmx;
3842     int semaem;
3843 };
3844 
3845 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3846                                               struct seminfo *host_seminfo)
3847 {
3848     struct target_seminfo *target_seminfo;
3849     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3850         return -TARGET_EFAULT;
3851     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3852     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3853     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3854     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3855     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3856     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3857     __put_user(host_seminfo->semume, &target_seminfo->semume);
3858     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3859     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3860     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3861     unlock_user_struct(target_seminfo, target_addr, 1);
3862     return 0;
3863 }
3864 
3865 union semun {
3866 	int val;
3867 	struct semid_ds *buf;
3868 	unsigned short *array;
3869 	struct seminfo *__buf;
3870 };
3871 
3872 union target_semun {
3873 	int val;
3874 	abi_ulong buf;
3875 	abi_ulong array;
3876 	abi_ulong __buf;
3877 };
3878 
3879 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3880                                                abi_ulong target_addr)
3881 {
3882     int nsems;
3883     unsigned short *array;
3884     union semun semun;
3885     struct semid_ds semid_ds;
3886     int i, ret;
3887 
3888     semun.buf = &semid_ds;
3889 
3890     ret = semctl(semid, 0, IPC_STAT, semun);
3891     if (ret == -1)
3892         return get_errno(ret);
3893 
3894     nsems = semid_ds.sem_nsems;
3895 
3896     *host_array = g_try_new(unsigned short, nsems);
3897     if (!*host_array) {
3898         return -TARGET_ENOMEM;
3899     }
3900     array = lock_user(VERIFY_READ, target_addr,
3901                       nsems*sizeof(unsigned short), 1);
3902     if (!array) {
3903         g_free(*host_array);
3904         return -TARGET_EFAULT;
3905     }
3906 
3907     for(i=0; i<nsems; i++) {
3908         __get_user((*host_array)[i], &array[i]);
3909     }
3910     unlock_user(array, target_addr, 0);
3911 
3912     return 0;
3913 }
3914 
3915 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3916                                                unsigned short **host_array)
3917 {
3918     int nsems;
3919     unsigned short *array;
3920     union semun semun;
3921     struct semid_ds semid_ds;
3922     int i, ret;
3923 
3924     semun.buf = &semid_ds;
3925 
3926     ret = semctl(semid, 0, IPC_STAT, semun);
3927     if (ret == -1)
3928         return get_errno(ret);
3929 
3930     nsems = semid_ds.sem_nsems;
3931 
3932     array = lock_user(VERIFY_WRITE, target_addr,
3933                       nsems*sizeof(unsigned short), 0);
3934     if (!array)
3935         return -TARGET_EFAULT;
3936 
3937     for(i=0; i<nsems; i++) {
3938         __put_user((*host_array)[i], &array[i]);
3939     }
3940     g_free(*host_array);
3941     unlock_user(array, target_addr, 1);
3942 
3943     return 0;
3944 }
3945 
3946 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3947                                  abi_ulong target_arg)
3948 {
3949     union target_semun target_su = { .buf = target_arg };
3950     union semun arg;
3951     struct semid_ds dsarg;
3952     unsigned short *array = NULL;
3953     struct seminfo seminfo;
3954     abi_long ret = -TARGET_EINVAL;
3955     abi_long err;
3956     cmd &= 0xff;
3957 
3958     switch( cmd ) {
3959 	case GETVAL:
3960 	case SETVAL:
3961             /* In 64 bit cross-endian situations, we will erroneously pick up
3962              * the wrong half of the union for the "val" element.  To rectify
3963              * this, the entire 8-byte structure is byteswapped, followed by
3964 	     * a swap of the 4 byte val field. In other cases, the data is
3965 	     * already in proper host byte order. */
3966 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3967 		target_su.buf = tswapal(target_su.buf);
3968 		arg.val = tswap32(target_su.val);
3969 	    } else {
3970 		arg.val = target_su.val;
3971 	    }
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             break;
3974 	case GETALL:
3975 	case SETALL:
3976             err = target_to_host_semarray(semid, &array, target_su.array);
3977             if (err)
3978                 return err;
3979             arg.array = array;
3980             ret = get_errno(semctl(semid, semnum, cmd, arg));
3981             err = host_to_target_semarray(semid, target_su.array, &array);
3982             if (err)
3983                 return err;
3984             break;
3985 	case IPC_STAT:
3986 	case IPC_SET:
3987 	case SEM_STAT:
3988             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3989             if (err)
3990                 return err;
3991             arg.buf = &dsarg;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_INFO:
3998 	case SEM_INFO:
3999             arg.__buf = &seminfo;
4000             ret = get_errno(semctl(semid, semnum, cmd, arg));
4001             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4002             if (err)
4003                 return err;
4004             break;
4005 	case IPC_RMID:
4006 	case GETPID:
4007 	case GETNCNT:
4008 	case GETZCNT:
4009             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4010             break;
4011     }
4012 
4013     return ret;
4014 }
4015 
4016 struct target_sembuf {
4017     unsigned short sem_num;
4018     short sem_op;
4019     short sem_flg;
4020 };
4021 
4022 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4023                                              abi_ulong target_addr,
4024                                              unsigned nsops)
4025 {
4026     struct target_sembuf *target_sembuf;
4027     int i;
4028 
4029     target_sembuf = lock_user(VERIFY_READ, target_addr,
4030                               nsops*sizeof(struct target_sembuf), 1);
4031     if (!target_sembuf)
4032         return -TARGET_EFAULT;
4033 
4034     for(i=0; i<nsops; i++) {
4035         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4036         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4037         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4038     }
4039 
4040     unlock_user(target_sembuf, target_addr, 0);
4041 
4042     return 0;
4043 }
4044 
4045 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4046     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4047 
4048 /*
4049  * This macro is required to handle the s390 variants, which passes the
4050  * arguments in a different order than default.
4051  */
4052 #ifdef __s390x__
4053 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4054   (__nsops), (__timeout), (__sops)
4055 #else
4056 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4057   (__nsops), 0, (__sops), (__timeout)
4058 #endif
4059 
4060 static inline abi_long do_semtimedop(int semid,
4061                                      abi_long ptr,
4062                                      unsigned nsops,
4063                                      abi_long timeout, bool time64)
4064 {
4065     struct sembuf *sops;
4066     struct timespec ts, *pts = NULL;
4067     abi_long ret;
4068 
4069     if (timeout) {
4070         pts = &ts;
4071         if (time64) {
4072             if (target_to_host_timespec64(pts, timeout)) {
4073                 return -TARGET_EFAULT;
4074             }
4075         } else {
4076             if (target_to_host_timespec(pts, timeout)) {
4077                 return -TARGET_EFAULT;
4078             }
4079         }
4080     }
4081 
4082     if (nsops > TARGET_SEMOPM) {
4083         return -TARGET_E2BIG;
4084     }
4085 
4086     sops = g_new(struct sembuf, nsops);
4087 
4088     if (target_to_host_sembuf(sops, ptr, nsops)) {
4089         g_free(sops);
4090         return -TARGET_EFAULT;
4091     }
4092 
4093     ret = -TARGET_ENOSYS;
4094 #ifdef __NR_semtimedop
4095     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4096 #endif
4097 #ifdef __NR_ipc
4098     if (ret == -TARGET_ENOSYS) {
4099         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4100                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4101     }
4102 #endif
4103     g_free(sops);
4104     return ret;
4105 }
4106 #endif
4107 
4108 struct target_msqid_ds
4109 {
4110     struct target_ipc_perm msg_perm;
4111     abi_ulong msg_stime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused1;
4114 #endif
4115     abi_ulong msg_rtime;
4116 #if TARGET_ABI_BITS == 32
4117     abi_ulong __unused2;
4118 #endif
4119     abi_ulong msg_ctime;
4120 #if TARGET_ABI_BITS == 32
4121     abi_ulong __unused3;
4122 #endif
4123     abi_ulong __msg_cbytes;
4124     abi_ulong msg_qnum;
4125     abi_ulong msg_qbytes;
4126     abi_ulong msg_lspid;
4127     abi_ulong msg_lrpid;
4128     abi_ulong __unused4;
4129     abi_ulong __unused5;
4130 };
4131 
4132 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4133                                                abi_ulong target_addr)
4134 {
4135     struct target_msqid_ds *target_md;
4136 
4137     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4138         return -TARGET_EFAULT;
4139     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4140         return -TARGET_EFAULT;
4141     host_md->msg_stime = tswapal(target_md->msg_stime);
4142     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4143     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4144     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4145     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4146     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4147     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4148     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4149     unlock_user_struct(target_md, target_addr, 0);
4150     return 0;
4151 }
4152 
4153 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4154                                                struct msqid_ds *host_md)
4155 {
4156     struct target_msqid_ds *target_md;
4157 
4158     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4159         return -TARGET_EFAULT;
4160     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4161         return -TARGET_EFAULT;
4162     target_md->msg_stime = tswapal(host_md->msg_stime);
4163     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4164     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4165     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4166     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4167     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4168     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4169     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4170     unlock_user_struct(target_md, target_addr, 1);
4171     return 0;
4172 }
4173 
4174 struct target_msginfo {
4175     int msgpool;
4176     int msgmap;
4177     int msgmax;
4178     int msgmnb;
4179     int msgmni;
4180     int msgssz;
4181     int msgtql;
4182     unsigned short int msgseg;
4183 };
4184 
4185 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4186                                               struct msginfo *host_msginfo)
4187 {
4188     struct target_msginfo *target_msginfo;
4189     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4190         return -TARGET_EFAULT;
4191     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4192     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4193     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4194     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4195     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4196     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4197     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4198     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4199     unlock_user_struct(target_msginfo, target_addr, 1);
4200     return 0;
4201 }
4202 
4203 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4204 {
4205     struct msqid_ds dsarg;
4206     struct msginfo msginfo;
4207     abi_long ret = -TARGET_EINVAL;
4208 
4209     cmd &= 0xff;
4210 
4211     switch (cmd) {
4212     case IPC_STAT:
4213     case IPC_SET:
4214     case MSG_STAT:
4215         if (target_to_host_msqid_ds(&dsarg,ptr))
4216             return -TARGET_EFAULT;
4217         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4218         if (host_to_target_msqid_ds(ptr,&dsarg))
4219             return -TARGET_EFAULT;
4220         break;
4221     case IPC_RMID:
4222         ret = get_errno(msgctl(msgid, cmd, NULL));
4223         break;
4224     case IPC_INFO:
4225     case MSG_INFO:
4226         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4227         if (host_to_target_msginfo(ptr, &msginfo))
4228             return -TARGET_EFAULT;
4229         break;
4230     }
4231 
4232     return ret;
4233 }
4234 
4235 struct target_msgbuf {
4236     abi_long mtype;
4237     char	mtext[1];
4238 };
4239 
4240 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4241                                  ssize_t msgsz, int msgflg)
4242 {
4243     struct target_msgbuf *target_mb;
4244     struct msgbuf *host_mb;
4245     abi_long ret = 0;
4246 
4247     if (msgsz < 0) {
4248         return -TARGET_EINVAL;
4249     }
4250 
4251     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4252         return -TARGET_EFAULT;
4253     host_mb = g_try_malloc(msgsz + sizeof(long));
4254     if (!host_mb) {
4255         unlock_user_struct(target_mb, msgp, 0);
4256         return -TARGET_ENOMEM;
4257     }
4258     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4259     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4260     ret = -TARGET_ENOSYS;
4261 #ifdef __NR_msgsnd
4262     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4263 #endif
4264 #ifdef __NR_ipc
4265     if (ret == -TARGET_ENOSYS) {
4266 #ifdef __s390x__
4267         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4268                                  host_mb));
4269 #else
4270         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4271                                  host_mb, 0));
4272 #endif
4273     }
4274 #endif
4275     g_free(host_mb);
4276     unlock_user_struct(target_mb, msgp, 0);
4277 
4278     return ret;
4279 }
4280 
4281 #ifdef __NR_ipc
4282 #if defined(__sparc__)
4283 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4284 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4285 #elif defined(__s390x__)
4286 /* The s390 sys_ipc variant has only five parameters.  */
4287 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4288     ((long int[]){(long int)__msgp, __msgtyp})
4289 #else
4290 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4291     ((long int[]){(long int)__msgp, __msgtyp}), 0
4292 #endif
4293 #endif
4294 
4295 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4296                                  ssize_t msgsz, abi_long msgtyp,
4297                                  int msgflg)
4298 {
4299     struct target_msgbuf *target_mb;
4300     char *target_mtext;
4301     struct msgbuf *host_mb;
4302     abi_long ret = 0;
4303 
4304     if (msgsz < 0) {
4305         return -TARGET_EINVAL;
4306     }
4307 
4308     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4309         return -TARGET_EFAULT;
4310 
4311     host_mb = g_try_malloc(msgsz + sizeof(long));
4312     if (!host_mb) {
4313         ret = -TARGET_ENOMEM;
4314         goto end;
4315     }
4316     ret = -TARGET_ENOSYS;
4317 #ifdef __NR_msgrcv
4318     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4319 #endif
4320 #ifdef __NR_ipc
4321     if (ret == -TARGET_ENOSYS) {
4322         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4323                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4324     }
4325 #endif
4326 
4327     if (ret > 0) {
4328         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4329         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4330         if (!target_mtext) {
4331             ret = -TARGET_EFAULT;
4332             goto end;
4333         }
4334         memcpy(target_mb->mtext, host_mb->mtext, ret);
4335         unlock_user(target_mtext, target_mtext_addr, ret);
4336     }
4337 
4338     target_mb->mtype = tswapal(host_mb->mtype);
4339 
4340 end:
4341     if (target_mb)
4342         unlock_user_struct(target_mb, msgp, 1);
4343     g_free(host_mb);
4344     return ret;
4345 }
4346 
4347 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4348                                                abi_ulong target_addr)
4349 {
4350     struct target_shmid_ds *target_sd;
4351 
4352     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4353         return -TARGET_EFAULT;
4354     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4355         return -TARGET_EFAULT;
4356     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4357     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4358     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4359     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4360     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4361     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4362     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4363     unlock_user_struct(target_sd, target_addr, 0);
4364     return 0;
4365 }
4366 
4367 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4368                                                struct shmid_ds *host_sd)
4369 {
4370     struct target_shmid_ds *target_sd;
4371 
4372     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4373         return -TARGET_EFAULT;
4374     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4375         return -TARGET_EFAULT;
4376     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4377     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4378     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4379     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4380     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4381     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4382     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4383     unlock_user_struct(target_sd, target_addr, 1);
4384     return 0;
4385 }
4386 
4387 struct  target_shminfo {
4388     abi_ulong shmmax;
4389     abi_ulong shmmin;
4390     abi_ulong shmmni;
4391     abi_ulong shmseg;
4392     abi_ulong shmall;
4393 };
4394 
4395 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4396                                               struct shminfo *host_shminfo)
4397 {
4398     struct target_shminfo *target_shminfo;
4399     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4400         return -TARGET_EFAULT;
4401     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4402     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4403     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4404     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4405     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4406     unlock_user_struct(target_shminfo, target_addr, 1);
4407     return 0;
4408 }
4409 
4410 struct target_shm_info {
4411     int used_ids;
4412     abi_ulong shm_tot;
4413     abi_ulong shm_rss;
4414     abi_ulong shm_swp;
4415     abi_ulong swap_attempts;
4416     abi_ulong swap_successes;
4417 };
4418 
4419 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4420                                                struct shm_info *host_shm_info)
4421 {
4422     struct target_shm_info *target_shm_info;
4423     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4424         return -TARGET_EFAULT;
4425     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4426     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4427     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4428     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4429     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4430     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4431     unlock_user_struct(target_shm_info, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4436 {
4437     struct shmid_ds dsarg;
4438     struct shminfo shminfo;
4439     struct shm_info shm_info;
4440     abi_long ret = -TARGET_EINVAL;
4441 
4442     cmd &= 0xff;
4443 
4444     switch(cmd) {
4445     case IPC_STAT:
4446     case IPC_SET:
4447     case SHM_STAT:
4448         if (target_to_host_shmid_ds(&dsarg, buf))
4449             return -TARGET_EFAULT;
4450         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4451         if (host_to_target_shmid_ds(buf, &dsarg))
4452             return -TARGET_EFAULT;
4453         break;
4454     case IPC_INFO:
4455         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4456         if (host_to_target_shminfo(buf, &shminfo))
4457             return -TARGET_EFAULT;
4458         break;
4459     case SHM_INFO:
4460         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4461         if (host_to_target_shm_info(buf, &shm_info))
4462             return -TARGET_EFAULT;
4463         break;
4464     case IPC_RMID:
4465     case SHM_LOCK:
4466     case SHM_UNLOCK:
4467         ret = get_errno(shmctl(shmid, cmd, NULL));
4468         break;
4469     }
4470 
4471     return ret;
4472 }
4473 
4474 #ifndef TARGET_FORCE_SHMLBA
4475 /* For most architectures, SHMLBA is the same as the page size;
4476  * some architectures have larger values, in which case they should
4477  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4478  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4479  * and defining its own value for SHMLBA.
4480  *
4481  * The kernel also permits SHMLBA to be set by the architecture to a
4482  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4483  * this means that addresses are rounded to the large size if
4484  * SHM_RND is set but addresses not aligned to that size are not rejected
4485  * as long as they are at least page-aligned. Since the only architecture
4486  * which uses this is ia64 this code doesn't provide for that oddity.
4487  */
4488 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4489 {
4490     return TARGET_PAGE_SIZE;
4491 }
4492 #endif
4493 
4494 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4495                                  int shmid, abi_ulong shmaddr, int shmflg)
4496 {
4497     CPUState *cpu = env_cpu(cpu_env);
4498     abi_long raddr;
4499     void *host_raddr;
4500     struct shmid_ds shm_info;
4501     int i,ret;
4502     abi_ulong shmlba;
4503 
4504     /* shmat pointers are always untagged */
4505 
4506     /* find out the length of the shared memory segment */
4507     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4508     if (is_error(ret)) {
4509         /* can't get length, bail out */
4510         return ret;
4511     }
4512 
4513     shmlba = target_shmlba(cpu_env);
4514 
4515     if (shmaddr & (shmlba - 1)) {
4516         if (shmflg & SHM_RND) {
4517             shmaddr &= ~(shmlba - 1);
4518         } else {
4519             return -TARGET_EINVAL;
4520         }
4521     }
4522     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4523         return -TARGET_EINVAL;
4524     }
4525 
4526     mmap_lock();
4527 
4528     /*
4529      * We're mapping shared memory, so ensure we generate code for parallel
4530      * execution and flush old translations.  This will work up to the level
4531      * supported by the host -- anything that requires EXCP_ATOMIC will not
4532      * be atomic with respect to an external process.
4533      */
4534     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4535         cpu->tcg_cflags |= CF_PARALLEL;
4536         tb_flush(cpu);
4537     }
4538 
4539     if (shmaddr)
4540         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4541     else {
4542         abi_ulong mmap_start;
4543 
4544         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4545         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4546 
4547         if (mmap_start == -1) {
4548             errno = ENOMEM;
4549             host_raddr = (void *)-1;
4550         } else
4551             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4552                                shmflg | SHM_REMAP);
4553     }
4554 
4555     if (host_raddr == (void *)-1) {
4556         mmap_unlock();
4557         return get_errno((long)host_raddr);
4558     }
4559     raddr=h2g((unsigned long)host_raddr);
4560 
4561     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4562                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4563                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4564 
4565     for (i = 0; i < N_SHM_REGIONS; i++) {
4566         if (!shm_regions[i].in_use) {
4567             shm_regions[i].in_use = true;
4568             shm_regions[i].start = raddr;
4569             shm_regions[i].size = shm_info.shm_segsz;
4570             break;
4571         }
4572     }
4573 
4574     mmap_unlock();
4575     return raddr;
4576 
4577 }
4578 
4579 static inline abi_long do_shmdt(abi_ulong shmaddr)
4580 {
4581     int i;
4582     abi_long rv;
4583 
4584     /* shmdt pointers are always untagged */
4585 
4586     mmap_lock();
4587 
4588     for (i = 0; i < N_SHM_REGIONS; ++i) {
4589         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4590             shm_regions[i].in_use = false;
4591             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4592             break;
4593         }
4594     }
4595     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4596 
4597     mmap_unlock();
4598 
4599     return rv;
4600 }
4601 
4602 #ifdef TARGET_NR_ipc
4603 /* ??? This only works with linear mappings.  */
4604 /* do_ipc() must return target values and target errnos. */
4605 static abi_long do_ipc(CPUArchState *cpu_env,
4606                        unsigned int call, abi_long first,
4607                        abi_long second, abi_long third,
4608                        abi_long ptr, abi_long fifth)
4609 {
4610     int version;
4611     abi_long ret = 0;
4612 
4613     version = call >> 16;
4614     call &= 0xffff;
4615 
4616     switch (call) {
4617     case IPCOP_semop:
4618         ret = do_semtimedop(first, ptr, second, 0, false);
4619         break;
4620     case IPCOP_semtimedop:
4621     /*
4622      * The s390 sys_ipc variant has only five parameters instead of six
4623      * (as for default variant) and the only difference is the handling of
4624      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4625      * to a struct timespec where the generic variant uses fifth parameter.
4626      */
4627 #if defined(TARGET_S390X)
4628         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4629 #else
4630         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4631 #endif
4632         break;
4633 
4634     case IPCOP_semget:
4635         ret = get_errno(semget(first, second, third));
4636         break;
4637 
4638     case IPCOP_semctl: {
4639         /* The semun argument to semctl is passed by value, so dereference the
4640          * ptr argument. */
4641         abi_ulong atptr;
4642         get_user_ual(atptr, ptr);
4643         ret = do_semctl(first, second, third, atptr);
4644         break;
4645     }
4646 
4647     case IPCOP_msgget:
4648         ret = get_errno(msgget(first, second));
4649         break;
4650 
4651     case IPCOP_msgsnd:
4652         ret = do_msgsnd(first, ptr, second, third);
4653         break;
4654 
4655     case IPCOP_msgctl:
4656         ret = do_msgctl(first, second, ptr);
4657         break;
4658 
4659     case IPCOP_msgrcv:
4660         switch (version) {
4661         case 0:
4662             {
4663                 struct target_ipc_kludge {
4664                     abi_long msgp;
4665                     abi_long msgtyp;
4666                 } *tmp;
4667 
4668                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4669                     ret = -TARGET_EFAULT;
4670                     break;
4671                 }
4672 
4673                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4674 
4675                 unlock_user_struct(tmp, ptr, 0);
4676                 break;
4677             }
4678         default:
4679             ret = do_msgrcv(first, ptr, second, fifth, third);
4680         }
4681         break;
4682 
4683     case IPCOP_shmat:
4684         switch (version) {
4685         default:
4686         {
4687             abi_ulong raddr;
4688             raddr = do_shmat(cpu_env, first, ptr, second);
4689             if (is_error(raddr))
4690                 return get_errno(raddr);
4691             if (put_user_ual(raddr, third))
4692                 return -TARGET_EFAULT;
4693             break;
4694         }
4695         case 1:
4696             ret = -TARGET_EINVAL;
4697             break;
4698         }
4699 	break;
4700     case IPCOP_shmdt:
4701         ret = do_shmdt(ptr);
4702 	break;
4703 
4704     case IPCOP_shmget:
4705 	/* IPC_* flag values are the same on all linux platforms */
4706 	ret = get_errno(shmget(first, second, third));
4707 	break;
4708 
4709 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4710     case IPCOP_shmctl:
4711         ret = do_shmctl(first, second, ptr);
4712         break;
4713     default:
4714         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4715                       call, version);
4716 	ret = -TARGET_ENOSYS;
4717 	break;
4718     }
4719     return ret;
4720 }
4721 #endif
4722 
4723 /* kernel structure types definitions */
4724 
4725 #define STRUCT(name, ...) STRUCT_ ## name,
4726 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4727 enum {
4728 #include "syscall_types.h"
4729 STRUCT_MAX
4730 };
4731 #undef STRUCT
4732 #undef STRUCT_SPECIAL
4733 
4734 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4735 #define STRUCT_SPECIAL(name)
4736 #include "syscall_types.h"
4737 #undef STRUCT
4738 #undef STRUCT_SPECIAL
4739 
4740 #define MAX_STRUCT_SIZE 4096
4741 
4742 #ifdef CONFIG_FIEMAP
4743 /* So fiemap access checks don't overflow on 32 bit systems.
4744  * This is very slightly smaller than the limit imposed by
4745  * the underlying kernel.
4746  */
4747 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4748                             / sizeof(struct fiemap_extent))
4749 
4750 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4751                                        int fd, int cmd, abi_long arg)
4752 {
4753     /* The parameter for this ioctl is a struct fiemap followed
4754      * by an array of struct fiemap_extent whose size is set
4755      * in fiemap->fm_extent_count. The array is filled in by the
4756      * ioctl.
4757      */
4758     int target_size_in, target_size_out;
4759     struct fiemap *fm;
4760     const argtype *arg_type = ie->arg_type;
4761     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4762     void *argptr, *p;
4763     abi_long ret;
4764     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4765     uint32_t outbufsz;
4766     int free_fm = 0;
4767 
4768     assert(arg_type[0] == TYPE_PTR);
4769     assert(ie->access == IOC_RW);
4770     arg_type++;
4771     target_size_in = thunk_type_size(arg_type, 0);
4772     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4773     if (!argptr) {
4774         return -TARGET_EFAULT;
4775     }
4776     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4777     unlock_user(argptr, arg, 0);
4778     fm = (struct fiemap *)buf_temp;
4779     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4780         return -TARGET_EINVAL;
4781     }
4782 
4783     outbufsz = sizeof (*fm) +
4784         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4785 
4786     if (outbufsz > MAX_STRUCT_SIZE) {
4787         /* We can't fit all the extents into the fixed size buffer.
4788          * Allocate one that is large enough and use it instead.
4789          */
4790         fm = g_try_malloc(outbufsz);
4791         if (!fm) {
4792             return -TARGET_ENOMEM;
4793         }
4794         memcpy(fm, buf_temp, sizeof(struct fiemap));
4795         free_fm = 1;
4796     }
4797     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4798     if (!is_error(ret)) {
4799         target_size_out = target_size_in;
4800         /* An extent_count of 0 means we were only counting the extents
4801          * so there are no structs to copy
4802          */
4803         if (fm->fm_extent_count != 0) {
4804             target_size_out += fm->fm_mapped_extents * extent_size;
4805         }
4806         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4807         if (!argptr) {
4808             ret = -TARGET_EFAULT;
4809         } else {
4810             /* Convert the struct fiemap */
4811             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4812             if (fm->fm_extent_count != 0) {
4813                 p = argptr + target_size_in;
4814                 /* ...and then all the struct fiemap_extents */
4815                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4816                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4817                                   THUNK_TARGET);
4818                     p += extent_size;
4819                 }
4820             }
4821             unlock_user(argptr, arg, target_size_out);
4822         }
4823     }
4824     if (free_fm) {
4825         g_free(fm);
4826     }
4827     return ret;
4828 }
4829 #endif
4830 
4831 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4832                                 int fd, int cmd, abi_long arg)
4833 {
4834     const argtype *arg_type = ie->arg_type;
4835     int target_size;
4836     void *argptr;
4837     int ret;
4838     struct ifconf *host_ifconf;
4839     uint32_t outbufsz;
4840     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4841     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4842     int target_ifreq_size;
4843     int nb_ifreq;
4844     int free_buf = 0;
4845     int i;
4846     int target_ifc_len;
4847     abi_long target_ifc_buf;
4848     int host_ifc_len;
4849     char *host_ifc_buf;
4850 
4851     assert(arg_type[0] == TYPE_PTR);
4852     assert(ie->access == IOC_RW);
4853 
4854     arg_type++;
4855     target_size = thunk_type_size(arg_type, 0);
4856 
4857     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4858     if (!argptr)
4859         return -TARGET_EFAULT;
4860     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4861     unlock_user(argptr, arg, 0);
4862 
4863     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4864     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4865     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4866 
4867     if (target_ifc_buf != 0) {
4868         target_ifc_len = host_ifconf->ifc_len;
4869         nb_ifreq = target_ifc_len / target_ifreq_size;
4870         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4871 
4872         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4873         if (outbufsz > MAX_STRUCT_SIZE) {
4874             /*
4875              * We can't fit all the extents into the fixed size buffer.
4876              * Allocate one that is large enough and use it instead.
4877              */
4878             host_ifconf = g_try_malloc(outbufsz);
4879             if (!host_ifconf) {
4880                 return -TARGET_ENOMEM;
4881             }
4882             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4883             free_buf = 1;
4884         }
4885         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4886 
4887         host_ifconf->ifc_len = host_ifc_len;
4888     } else {
4889       host_ifc_buf = NULL;
4890     }
4891     host_ifconf->ifc_buf = host_ifc_buf;
4892 
4893     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4894     if (!is_error(ret)) {
4895 	/* convert host ifc_len to target ifc_len */
4896 
4897         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4898         target_ifc_len = nb_ifreq * target_ifreq_size;
4899         host_ifconf->ifc_len = target_ifc_len;
4900 
4901 	/* restore target ifc_buf */
4902 
4903         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4904 
4905 	/* copy struct ifconf to target user */
4906 
4907         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4908         if (!argptr)
4909             return -TARGET_EFAULT;
4910         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4911         unlock_user(argptr, arg, target_size);
4912 
4913         if (target_ifc_buf != 0) {
4914             /* copy ifreq[] to target user */
4915             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4916             for (i = 0; i < nb_ifreq ; i++) {
4917                 thunk_convert(argptr + i * target_ifreq_size,
4918                               host_ifc_buf + i * sizeof(struct ifreq),
4919                               ifreq_arg_type, THUNK_TARGET);
4920             }
4921             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4922         }
4923     }
4924 
4925     if (free_buf) {
4926         g_free(host_ifconf);
4927     }
4928 
4929     return ret;
4930 }
4931 
4932 #if defined(CONFIG_USBFS)
4933 #if HOST_LONG_BITS > 64
4934 #error USBDEVFS thunks do not support >64 bit hosts yet.
4935 #endif
4936 struct live_urb {
4937     uint64_t target_urb_adr;
4938     uint64_t target_buf_adr;
4939     char *target_buf_ptr;
4940     struct usbdevfs_urb host_urb;
4941 };
4942 
4943 static GHashTable *usbdevfs_urb_hashtable(void)
4944 {
4945     static GHashTable *urb_hashtable;
4946 
4947     if (!urb_hashtable) {
4948         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4949     }
4950     return urb_hashtable;
4951 }
4952 
4953 static void urb_hashtable_insert(struct live_urb *urb)
4954 {
4955     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4956     g_hash_table_insert(urb_hashtable, urb, urb);
4957 }
4958 
4959 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4960 {
4961     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4962     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4963 }
4964 
4965 static void urb_hashtable_remove(struct live_urb *urb)
4966 {
4967     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968     g_hash_table_remove(urb_hashtable, urb);
4969 }
4970 
4971 static abi_long
4972 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4973                           int fd, int cmd, abi_long arg)
4974 {
4975     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4976     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4977     struct live_urb *lurb;
4978     void *argptr;
4979     uint64_t hurb;
4980     int target_size;
4981     uintptr_t target_urb_adr;
4982     abi_long ret;
4983 
4984     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4985 
4986     memset(buf_temp, 0, sizeof(uint64_t));
4987     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4988     if (is_error(ret)) {
4989         return ret;
4990     }
4991 
4992     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4993     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4994     if (!lurb->target_urb_adr) {
4995         return -TARGET_EFAULT;
4996     }
4997     urb_hashtable_remove(lurb);
4998     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4999         lurb->host_urb.buffer_length);
5000     lurb->target_buf_ptr = NULL;
5001 
5002     /* restore the guest buffer pointer */
5003     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5004 
5005     /* update the guest urb struct */
5006     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5007     if (!argptr) {
5008         g_free(lurb);
5009         return -TARGET_EFAULT;
5010     }
5011     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5012     unlock_user(argptr, lurb->target_urb_adr, target_size);
5013 
5014     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5015     /* write back the urb handle */
5016     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5017     if (!argptr) {
5018         g_free(lurb);
5019         return -TARGET_EFAULT;
5020     }
5021 
5022     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5023     target_urb_adr = lurb->target_urb_adr;
5024     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5025     unlock_user(argptr, arg, target_size);
5026 
5027     g_free(lurb);
5028     return ret;
5029 }
5030 
5031 static abi_long
5032 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5033                              uint8_t *buf_temp __attribute__((unused)),
5034                              int fd, int cmd, abi_long arg)
5035 {
5036     struct live_urb *lurb;
5037 
5038     /* map target address back to host URB with metadata. */
5039     lurb = urb_hashtable_lookup(arg);
5040     if (!lurb) {
5041         return -TARGET_EFAULT;
5042     }
5043     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5044 }
5045 
5046 static abi_long
5047 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5048                             int fd, int cmd, abi_long arg)
5049 {
5050     const argtype *arg_type = ie->arg_type;
5051     int target_size;
5052     abi_long ret;
5053     void *argptr;
5054     int rw_dir;
5055     struct live_urb *lurb;
5056 
5057     /*
5058      * each submitted URB needs to map to a unique ID for the
5059      * kernel, and that unique ID needs to be a pointer to
5060      * host memory.  hence, we need to malloc for each URB.
5061      * isochronous transfers have a variable length struct.
5062      */
5063     arg_type++;
5064     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5065 
5066     /* construct host copy of urb and metadata */
5067     lurb = g_try_new0(struct live_urb, 1);
5068     if (!lurb) {
5069         return -TARGET_ENOMEM;
5070     }
5071 
5072     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5073     if (!argptr) {
5074         g_free(lurb);
5075         return -TARGET_EFAULT;
5076     }
5077     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5078     unlock_user(argptr, arg, 0);
5079 
5080     lurb->target_urb_adr = arg;
5081     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5082 
5083     /* buffer space used depends on endpoint type so lock the entire buffer */
5084     /* control type urbs should check the buffer contents for true direction */
5085     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5086     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5087         lurb->host_urb.buffer_length, 1);
5088     if (lurb->target_buf_ptr == NULL) {
5089         g_free(lurb);
5090         return -TARGET_EFAULT;
5091     }
5092 
5093     /* update buffer pointer in host copy */
5094     lurb->host_urb.buffer = lurb->target_buf_ptr;
5095 
5096     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5097     if (is_error(ret)) {
5098         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5099         g_free(lurb);
5100     } else {
5101         urb_hashtable_insert(lurb);
5102     }
5103 
5104     return ret;
5105 }
5106 #endif /* CONFIG_USBFS */
5107 
5108 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5109                             int cmd, abi_long arg)
5110 {
5111     void *argptr;
5112     struct dm_ioctl *host_dm;
5113     abi_long guest_data;
5114     uint32_t guest_data_size;
5115     int target_size;
5116     const argtype *arg_type = ie->arg_type;
5117     abi_long ret;
5118     void *big_buf = NULL;
5119     char *host_data;
5120 
5121     arg_type++;
5122     target_size = thunk_type_size(arg_type, 0);
5123     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5124     if (!argptr) {
5125         ret = -TARGET_EFAULT;
5126         goto out;
5127     }
5128     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5129     unlock_user(argptr, arg, 0);
5130 
5131     /* buf_temp is too small, so fetch things into a bigger buffer */
5132     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5133     memcpy(big_buf, buf_temp, target_size);
5134     buf_temp = big_buf;
5135     host_dm = big_buf;
5136 
5137     guest_data = arg + host_dm->data_start;
5138     if ((guest_data - arg) < 0) {
5139         ret = -TARGET_EINVAL;
5140         goto out;
5141     }
5142     guest_data_size = host_dm->data_size - host_dm->data_start;
5143     host_data = (char*)host_dm + host_dm->data_start;
5144 
5145     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5146     if (!argptr) {
5147         ret = -TARGET_EFAULT;
5148         goto out;
5149     }
5150 
5151     switch (ie->host_cmd) {
5152     case DM_REMOVE_ALL:
5153     case DM_LIST_DEVICES:
5154     case DM_DEV_CREATE:
5155     case DM_DEV_REMOVE:
5156     case DM_DEV_SUSPEND:
5157     case DM_DEV_STATUS:
5158     case DM_DEV_WAIT:
5159     case DM_TABLE_STATUS:
5160     case DM_TABLE_CLEAR:
5161     case DM_TABLE_DEPS:
5162     case DM_LIST_VERSIONS:
5163         /* no input data */
5164         break;
5165     case DM_DEV_RENAME:
5166     case DM_DEV_SET_GEOMETRY:
5167         /* data contains only strings */
5168         memcpy(host_data, argptr, guest_data_size);
5169         break;
5170     case DM_TARGET_MSG:
5171         memcpy(host_data, argptr, guest_data_size);
5172         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5173         break;
5174     case DM_TABLE_LOAD:
5175     {
5176         void *gspec = argptr;
5177         void *cur_data = host_data;
5178         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5179         int spec_size = thunk_type_size(arg_type, 0);
5180         int i;
5181 
5182         for (i = 0; i < host_dm->target_count; i++) {
5183             struct dm_target_spec *spec = cur_data;
5184             uint32_t next;
5185             int slen;
5186 
5187             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5188             slen = strlen((char*)gspec + spec_size) + 1;
5189             next = spec->next;
5190             spec->next = sizeof(*spec) + slen;
5191             strcpy((char*)&spec[1], gspec + spec_size);
5192             gspec += next;
5193             cur_data += spec->next;
5194         }
5195         break;
5196     }
5197     default:
5198         ret = -TARGET_EINVAL;
5199         unlock_user(argptr, guest_data, 0);
5200         goto out;
5201     }
5202     unlock_user(argptr, guest_data, 0);
5203 
5204     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5205     if (!is_error(ret)) {
5206         guest_data = arg + host_dm->data_start;
5207         guest_data_size = host_dm->data_size - host_dm->data_start;
5208         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5209         switch (ie->host_cmd) {
5210         case DM_REMOVE_ALL:
5211         case DM_DEV_CREATE:
5212         case DM_DEV_REMOVE:
5213         case DM_DEV_RENAME:
5214         case DM_DEV_SUSPEND:
5215         case DM_DEV_STATUS:
5216         case DM_TABLE_LOAD:
5217         case DM_TABLE_CLEAR:
5218         case DM_TARGET_MSG:
5219         case DM_DEV_SET_GEOMETRY:
5220             /* no return data */
5221             break;
5222         case DM_LIST_DEVICES:
5223         {
5224             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5225             uint32_t remaining_data = guest_data_size;
5226             void *cur_data = argptr;
5227             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5228             int nl_size = 12; /* can't use thunk_size due to alignment */
5229 
5230             while (1) {
5231                 uint32_t next = nl->next;
5232                 if (next) {
5233                     nl->next = nl_size + (strlen(nl->name) + 1);
5234                 }
5235                 if (remaining_data < nl->next) {
5236                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5237                     break;
5238                 }
5239                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5240                 strcpy(cur_data + nl_size, nl->name);
5241                 cur_data += nl->next;
5242                 remaining_data -= nl->next;
5243                 if (!next) {
5244                     break;
5245                 }
5246                 nl = (void*)nl + next;
5247             }
5248             break;
5249         }
5250         case DM_DEV_WAIT:
5251         case DM_TABLE_STATUS:
5252         {
5253             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5254             void *cur_data = argptr;
5255             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5256             int spec_size = thunk_type_size(arg_type, 0);
5257             int i;
5258 
5259             for (i = 0; i < host_dm->target_count; i++) {
5260                 uint32_t next = spec->next;
5261                 int slen = strlen((char*)&spec[1]) + 1;
5262                 spec->next = (cur_data - argptr) + spec_size + slen;
5263                 if (guest_data_size < spec->next) {
5264                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5265                     break;
5266                 }
5267                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5268                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5269                 cur_data = argptr + spec->next;
5270                 spec = (void*)host_dm + host_dm->data_start + next;
5271             }
5272             break;
5273         }
5274         case DM_TABLE_DEPS:
5275         {
5276             void *hdata = (void*)host_dm + host_dm->data_start;
5277             int count = *(uint32_t*)hdata;
5278             uint64_t *hdev = hdata + 8;
5279             uint64_t *gdev = argptr + 8;
5280             int i;
5281 
5282             *(uint32_t*)argptr = tswap32(count);
5283             for (i = 0; i < count; i++) {
5284                 *gdev = tswap64(*hdev);
5285                 gdev++;
5286                 hdev++;
5287             }
5288             break;
5289         }
5290         case DM_LIST_VERSIONS:
5291         {
5292             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5293             uint32_t remaining_data = guest_data_size;
5294             void *cur_data = argptr;
5295             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5296             int vers_size = thunk_type_size(arg_type, 0);
5297 
5298             while (1) {
5299                 uint32_t next = vers->next;
5300                 if (next) {
5301                     vers->next = vers_size + (strlen(vers->name) + 1);
5302                 }
5303                 if (remaining_data < vers->next) {
5304                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5305                     break;
5306                 }
5307                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5308                 strcpy(cur_data + vers_size, vers->name);
5309                 cur_data += vers->next;
5310                 remaining_data -= vers->next;
5311                 if (!next) {
5312                     break;
5313                 }
5314                 vers = (void*)vers + next;
5315             }
5316             break;
5317         }
5318         default:
5319             unlock_user(argptr, guest_data, 0);
5320             ret = -TARGET_EINVAL;
5321             goto out;
5322         }
5323         unlock_user(argptr, guest_data, guest_data_size);
5324 
5325         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5326         if (!argptr) {
5327             ret = -TARGET_EFAULT;
5328             goto out;
5329         }
5330         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5331         unlock_user(argptr, arg, target_size);
5332     }
5333 out:
5334     g_free(big_buf);
5335     return ret;
5336 }
5337 
5338 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5339                                int cmd, abi_long arg)
5340 {
5341     void *argptr;
5342     int target_size;
5343     const argtype *arg_type = ie->arg_type;
5344     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5345     abi_long ret;
5346 
5347     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5348     struct blkpg_partition host_part;
5349 
5350     /* Read and convert blkpg */
5351     arg_type++;
5352     target_size = thunk_type_size(arg_type, 0);
5353     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5354     if (!argptr) {
5355         ret = -TARGET_EFAULT;
5356         goto out;
5357     }
5358     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5359     unlock_user(argptr, arg, 0);
5360 
5361     switch (host_blkpg->op) {
5362     case BLKPG_ADD_PARTITION:
5363     case BLKPG_DEL_PARTITION:
5364         /* payload is struct blkpg_partition */
5365         break;
5366     default:
5367         /* Unknown opcode */
5368         ret = -TARGET_EINVAL;
5369         goto out;
5370     }
5371 
5372     /* Read and convert blkpg->data */
5373     arg = (abi_long)(uintptr_t)host_blkpg->data;
5374     target_size = thunk_type_size(part_arg_type, 0);
5375     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5376     if (!argptr) {
5377         ret = -TARGET_EFAULT;
5378         goto out;
5379     }
5380     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5381     unlock_user(argptr, arg, 0);
5382 
5383     /* Swizzle the data pointer to our local copy and call! */
5384     host_blkpg->data = &host_part;
5385     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5386 
5387 out:
5388     return ret;
5389 }
5390 
5391 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5392                                 int fd, int cmd, abi_long arg)
5393 {
5394     const argtype *arg_type = ie->arg_type;
5395     const StructEntry *se;
5396     const argtype *field_types;
5397     const int *dst_offsets, *src_offsets;
5398     int target_size;
5399     void *argptr;
5400     abi_ulong *target_rt_dev_ptr = NULL;
5401     unsigned long *host_rt_dev_ptr = NULL;
5402     abi_long ret;
5403     int i;
5404 
5405     assert(ie->access == IOC_W);
5406     assert(*arg_type == TYPE_PTR);
5407     arg_type++;
5408     assert(*arg_type == TYPE_STRUCT);
5409     target_size = thunk_type_size(arg_type, 0);
5410     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5411     if (!argptr) {
5412         return -TARGET_EFAULT;
5413     }
5414     arg_type++;
5415     assert(*arg_type == (int)STRUCT_rtentry);
5416     se = struct_entries + *arg_type++;
5417     assert(se->convert[0] == NULL);
5418     /* convert struct here to be able to catch rt_dev string */
5419     field_types = se->field_types;
5420     dst_offsets = se->field_offsets[THUNK_HOST];
5421     src_offsets = se->field_offsets[THUNK_TARGET];
5422     for (i = 0; i < se->nb_fields; i++) {
5423         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5424             assert(*field_types == TYPE_PTRVOID);
5425             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5426             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5427             if (*target_rt_dev_ptr != 0) {
5428                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5429                                                   tswapal(*target_rt_dev_ptr));
5430                 if (!*host_rt_dev_ptr) {
5431                     unlock_user(argptr, arg, 0);
5432                     return -TARGET_EFAULT;
5433                 }
5434             } else {
5435                 *host_rt_dev_ptr = 0;
5436             }
5437             field_types++;
5438             continue;
5439         }
5440         field_types = thunk_convert(buf_temp + dst_offsets[i],
5441                                     argptr + src_offsets[i],
5442                                     field_types, THUNK_HOST);
5443     }
5444     unlock_user(argptr, arg, 0);
5445 
5446     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5447 
5448     assert(host_rt_dev_ptr != NULL);
5449     assert(target_rt_dev_ptr != NULL);
5450     if (*host_rt_dev_ptr != 0) {
5451         unlock_user((void *)*host_rt_dev_ptr,
5452                     *target_rt_dev_ptr, 0);
5453     }
5454     return ret;
5455 }
5456 
5457 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5458                                      int fd, int cmd, abi_long arg)
5459 {
5460     int sig = target_to_host_signal(arg);
5461     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5462 }
5463 
5464 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5465                                     int fd, int cmd, abi_long arg)
5466 {
5467     struct timeval tv;
5468     abi_long ret;
5469 
5470     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5471     if (is_error(ret)) {
5472         return ret;
5473     }
5474 
5475     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5476         if (copy_to_user_timeval(arg, &tv)) {
5477             return -TARGET_EFAULT;
5478         }
5479     } else {
5480         if (copy_to_user_timeval64(arg, &tv)) {
5481             return -TARGET_EFAULT;
5482         }
5483     }
5484 
5485     return ret;
5486 }
5487 
5488 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5489                                       int fd, int cmd, abi_long arg)
5490 {
5491     struct timespec ts;
5492     abi_long ret;
5493 
5494     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5495     if (is_error(ret)) {
5496         return ret;
5497     }
5498 
5499     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5500         if (host_to_target_timespec(arg, &ts)) {
5501             return -TARGET_EFAULT;
5502         }
5503     } else{
5504         if (host_to_target_timespec64(arg, &ts)) {
5505             return -TARGET_EFAULT;
5506         }
5507     }
5508 
5509     return ret;
5510 }
5511 
5512 #ifdef TIOCGPTPEER
5513 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                      int fd, int cmd, abi_long arg)
5515 {
5516     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5517     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5518 }
5519 #endif
5520 
5521 #ifdef HAVE_DRM_H
5522 
5523 static void unlock_drm_version(struct drm_version *host_ver,
5524                                struct target_drm_version *target_ver,
5525                                bool copy)
5526 {
5527     unlock_user(host_ver->name, target_ver->name,
5528                                 copy ? host_ver->name_len : 0);
5529     unlock_user(host_ver->date, target_ver->date,
5530                                 copy ? host_ver->date_len : 0);
5531     unlock_user(host_ver->desc, target_ver->desc,
5532                                 copy ? host_ver->desc_len : 0);
5533 }
5534 
5535 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5536                                           struct target_drm_version *target_ver)
5537 {
5538     memset(host_ver, 0, sizeof(*host_ver));
5539 
5540     __get_user(host_ver->name_len, &target_ver->name_len);
5541     if (host_ver->name_len) {
5542         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5543                                    target_ver->name_len, 0);
5544         if (!host_ver->name) {
5545             return -EFAULT;
5546         }
5547     }
5548 
5549     __get_user(host_ver->date_len, &target_ver->date_len);
5550     if (host_ver->date_len) {
5551         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5552                                    target_ver->date_len, 0);
5553         if (!host_ver->date) {
5554             goto err;
5555         }
5556     }
5557 
5558     __get_user(host_ver->desc_len, &target_ver->desc_len);
5559     if (host_ver->desc_len) {
5560         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5561                                    target_ver->desc_len, 0);
5562         if (!host_ver->desc) {
5563             goto err;
5564         }
5565     }
5566 
5567     return 0;
5568 err:
5569     unlock_drm_version(host_ver, target_ver, false);
5570     return -EFAULT;
5571 }
5572 
5573 static inline void host_to_target_drmversion(
5574                                           struct target_drm_version *target_ver,
5575                                           struct drm_version *host_ver)
5576 {
5577     __put_user(host_ver->version_major, &target_ver->version_major);
5578     __put_user(host_ver->version_minor, &target_ver->version_minor);
5579     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5580     __put_user(host_ver->name_len, &target_ver->name_len);
5581     __put_user(host_ver->date_len, &target_ver->date_len);
5582     __put_user(host_ver->desc_len, &target_ver->desc_len);
5583     unlock_drm_version(host_ver, target_ver, true);
5584 }
5585 
5586 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5587                              int fd, int cmd, abi_long arg)
5588 {
5589     struct drm_version *ver;
5590     struct target_drm_version *target_ver;
5591     abi_long ret;
5592 
5593     switch (ie->host_cmd) {
5594     case DRM_IOCTL_VERSION:
5595         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5596             return -TARGET_EFAULT;
5597         }
5598         ver = (struct drm_version *)buf_temp;
5599         ret = target_to_host_drmversion(ver, target_ver);
5600         if (!is_error(ret)) {
5601             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5602             if (is_error(ret)) {
5603                 unlock_drm_version(ver, target_ver, false);
5604             } else {
5605                 host_to_target_drmversion(target_ver, ver);
5606             }
5607         }
5608         unlock_user_struct(target_ver, arg, 0);
5609         return ret;
5610     }
5611     return -TARGET_ENOSYS;
5612 }
5613 
5614 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5615                                            struct drm_i915_getparam *gparam,
5616                                            int fd, abi_long arg)
5617 {
5618     abi_long ret;
5619     int value;
5620     struct target_drm_i915_getparam *target_gparam;
5621 
5622     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5623         return -TARGET_EFAULT;
5624     }
5625 
5626     __get_user(gparam->param, &target_gparam->param);
5627     gparam->value = &value;
5628     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5629     put_user_s32(value, target_gparam->value);
5630 
5631     unlock_user_struct(target_gparam, arg, 0);
5632     return ret;
5633 }
5634 
5635 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5636                                   int fd, int cmd, abi_long arg)
5637 {
5638     switch (ie->host_cmd) {
5639     case DRM_IOCTL_I915_GETPARAM:
5640         return do_ioctl_drm_i915_getparam(ie,
5641                                           (struct drm_i915_getparam *)buf_temp,
5642                                           fd, arg);
5643     default:
5644         return -TARGET_ENOSYS;
5645     }
5646 }
5647 
5648 #endif
5649 
5650 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5651                                         int fd, int cmd, abi_long arg)
5652 {
5653     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5654     struct tun_filter *target_filter;
5655     char *target_addr;
5656 
5657     assert(ie->access == IOC_W);
5658 
5659     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5660     if (!target_filter) {
5661         return -TARGET_EFAULT;
5662     }
5663     filter->flags = tswap16(target_filter->flags);
5664     filter->count = tswap16(target_filter->count);
5665     unlock_user(target_filter, arg, 0);
5666 
5667     if (filter->count) {
5668         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5669             MAX_STRUCT_SIZE) {
5670             return -TARGET_EFAULT;
5671         }
5672 
5673         target_addr = lock_user(VERIFY_READ,
5674                                 arg + offsetof(struct tun_filter, addr),
5675                                 filter->count * ETH_ALEN, 1);
5676         if (!target_addr) {
5677             return -TARGET_EFAULT;
5678         }
5679         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5680         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5681     }
5682 
5683     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5684 }
5685 
5686 IOCTLEntry ioctl_entries[] = {
5687 #define IOCTL(cmd, access, ...) \
5688     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5689 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5690     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5691 #define IOCTL_IGNORE(cmd) \
5692     { TARGET_ ## cmd, 0, #cmd },
5693 #include "ioctls.h"
5694     { 0, 0, },
5695 };
5696 
5697 /* ??? Implement proper locking for ioctls.  */
5698 /* do_ioctl() Must return target values and target errnos. */
5699 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5700 {
5701     const IOCTLEntry *ie;
5702     const argtype *arg_type;
5703     abi_long ret;
5704     uint8_t buf_temp[MAX_STRUCT_SIZE];
5705     int target_size;
5706     void *argptr;
5707 
5708     ie = ioctl_entries;
5709     for(;;) {
5710         if (ie->target_cmd == 0) {
5711             qemu_log_mask(
5712                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5713             return -TARGET_ENOSYS;
5714         }
5715         if (ie->target_cmd == cmd)
5716             break;
5717         ie++;
5718     }
5719     arg_type = ie->arg_type;
5720     if (ie->do_ioctl) {
5721         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5722     } else if (!ie->host_cmd) {
5723         /* Some architectures define BSD ioctls in their headers
5724            that are not implemented in Linux.  */
5725         return -TARGET_ENOSYS;
5726     }
5727 
5728     switch(arg_type[0]) {
5729     case TYPE_NULL:
5730         /* no argument */
5731         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5732         break;
5733     case TYPE_PTRVOID:
5734     case TYPE_INT:
5735     case TYPE_LONG:
5736     case TYPE_ULONG:
5737         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5738         break;
5739     case TYPE_PTR:
5740         arg_type++;
5741         target_size = thunk_type_size(arg_type, 0);
5742         switch(ie->access) {
5743         case IOC_R:
5744             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5745             if (!is_error(ret)) {
5746                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5747                 if (!argptr)
5748                     return -TARGET_EFAULT;
5749                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5750                 unlock_user(argptr, arg, target_size);
5751             }
5752             break;
5753         case IOC_W:
5754             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5755             if (!argptr)
5756                 return -TARGET_EFAULT;
5757             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5758             unlock_user(argptr, arg, 0);
5759             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5760             break;
5761         default:
5762         case IOC_RW:
5763             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5764             if (!argptr)
5765                 return -TARGET_EFAULT;
5766             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5767             unlock_user(argptr, arg, 0);
5768             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5769             if (!is_error(ret)) {
5770                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5771                 if (!argptr)
5772                     return -TARGET_EFAULT;
5773                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5774                 unlock_user(argptr, arg, target_size);
5775             }
5776             break;
5777         }
5778         break;
5779     default:
5780         qemu_log_mask(LOG_UNIMP,
5781                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5782                       (long)cmd, arg_type[0]);
5783         ret = -TARGET_ENOSYS;
5784         break;
5785     }
5786     return ret;
5787 }
5788 
5789 static const bitmask_transtbl iflag_tbl[] = {
5790         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5791         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5792         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5793         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5794         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5795         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5796         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5797         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5798         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5799         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5800         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5801         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5802         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5803         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5804         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5805         { 0, 0, 0, 0 }
5806 };
5807 
5808 static const bitmask_transtbl oflag_tbl[] = {
5809 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5810 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5811 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5812 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5813 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5814 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5815 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5816 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5817 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5818 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5819 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5820 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5821 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5822 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5823 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5824 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5825 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5826 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5827 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5828 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5829 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5830 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5831 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5832 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5833 	{ 0, 0, 0, 0 }
5834 };
5835 
5836 static const bitmask_transtbl cflag_tbl[] = {
5837 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5838 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5839 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5840 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5841 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5842 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5843 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5844 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5845 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5846 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5847 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5848 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5849 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5850 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5851 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5852 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5853 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5854 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5855 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5856 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5857 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5858 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5859 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5860 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5861 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5862 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5863 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5864 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5865 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5866 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5867 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5868 	{ 0, 0, 0, 0 }
5869 };
5870 
5871 static const bitmask_transtbl lflag_tbl[] = {
5872   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5873   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5874   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5875   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5876   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5877   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5878   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5879   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5880   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5881   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5882   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5883   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5884   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5885   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5886   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5887   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5888   { 0, 0, 0, 0 }
5889 };
5890 
5891 static void target_to_host_termios (void *dst, const void *src)
5892 {
5893     struct host_termios *host = dst;
5894     const struct target_termios *target = src;
5895 
5896     host->c_iflag =
5897         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5898     host->c_oflag =
5899         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5900     host->c_cflag =
5901         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5902     host->c_lflag =
5903         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5904     host->c_line = target->c_line;
5905 
5906     memset(host->c_cc, 0, sizeof(host->c_cc));
5907     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5908     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5909     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5910     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5911     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5912     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5913     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5914     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5915     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5916     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5917     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5918     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5919     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5920     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5921     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5922     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5923     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5924 }
5925 
5926 static void host_to_target_termios (void *dst, const void *src)
5927 {
5928     struct target_termios *target = dst;
5929     const struct host_termios *host = src;
5930 
5931     target->c_iflag =
5932         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5933     target->c_oflag =
5934         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5935     target->c_cflag =
5936         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5937     target->c_lflag =
5938         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5939     target->c_line = host->c_line;
5940 
5941     memset(target->c_cc, 0, sizeof(target->c_cc));
5942     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5943     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5944     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5945     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5946     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5947     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5948     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5949     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5950     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5951     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5952     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5953     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5954     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5955     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5956     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5957     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5958     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5959 }
5960 
5961 static const StructEntry struct_termios_def = {
5962     .convert = { host_to_target_termios, target_to_host_termios },
5963     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5964     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5965     .print = print_termios,
5966 };
5967 
5968 static const bitmask_transtbl mmap_flags_tbl[] = {
5969     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5970     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5971     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5972     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5973       MAP_ANONYMOUS, MAP_ANONYMOUS },
5974     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5975       MAP_GROWSDOWN, MAP_GROWSDOWN },
5976     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5977       MAP_DENYWRITE, MAP_DENYWRITE },
5978     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5979       MAP_EXECUTABLE, MAP_EXECUTABLE },
5980     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5981     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5982       MAP_NORESERVE, MAP_NORESERVE },
5983     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5984     /* MAP_STACK had been ignored by the kernel for quite some time.
5985        Recognize it for the target insofar as we do not want to pass
5986        it through to the host.  */
5987     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5988     { 0, 0, 0, 0 }
5989 };
5990 
5991 /*
5992  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5993  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5994  */
5995 #if defined(TARGET_I386)
5996 
5997 /* NOTE: there is really one LDT for all the threads */
5998 static uint8_t *ldt_table;
5999 
6000 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6001 {
6002     int size;
6003     void *p;
6004 
6005     if (!ldt_table)
6006         return 0;
6007     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6008     if (size > bytecount)
6009         size = bytecount;
6010     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6011     if (!p)
6012         return -TARGET_EFAULT;
6013     /* ??? Should this by byteswapped?  */
6014     memcpy(p, ldt_table, size);
6015     unlock_user(p, ptr, size);
6016     return size;
6017 }
6018 
6019 /* XXX: add locking support */
6020 static abi_long write_ldt(CPUX86State *env,
6021                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6022 {
6023     struct target_modify_ldt_ldt_s ldt_info;
6024     struct target_modify_ldt_ldt_s *target_ldt_info;
6025     int seg_32bit, contents, read_exec_only, limit_in_pages;
6026     int seg_not_present, useable, lm;
6027     uint32_t *lp, entry_1, entry_2;
6028 
6029     if (bytecount != sizeof(ldt_info))
6030         return -TARGET_EINVAL;
6031     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6032         return -TARGET_EFAULT;
6033     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6034     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6035     ldt_info.limit = tswap32(target_ldt_info->limit);
6036     ldt_info.flags = tswap32(target_ldt_info->flags);
6037     unlock_user_struct(target_ldt_info, ptr, 0);
6038 
6039     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6040         return -TARGET_EINVAL;
6041     seg_32bit = ldt_info.flags & 1;
6042     contents = (ldt_info.flags >> 1) & 3;
6043     read_exec_only = (ldt_info.flags >> 3) & 1;
6044     limit_in_pages = (ldt_info.flags >> 4) & 1;
6045     seg_not_present = (ldt_info.flags >> 5) & 1;
6046     useable = (ldt_info.flags >> 6) & 1;
6047 #ifdef TARGET_ABI32
6048     lm = 0;
6049 #else
6050     lm = (ldt_info.flags >> 7) & 1;
6051 #endif
6052     if (contents == 3) {
6053         if (oldmode)
6054             return -TARGET_EINVAL;
6055         if (seg_not_present == 0)
6056             return -TARGET_EINVAL;
6057     }
6058     /* allocate the LDT */
6059     if (!ldt_table) {
6060         env->ldt.base = target_mmap(0,
6061                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6062                                     PROT_READ|PROT_WRITE,
6063                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6064         if (env->ldt.base == -1)
6065             return -TARGET_ENOMEM;
6066         memset(g2h_untagged(env->ldt.base), 0,
6067                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6068         env->ldt.limit = 0xffff;
6069         ldt_table = g2h_untagged(env->ldt.base);
6070     }
6071 
6072     /* NOTE: same code as Linux kernel */
6073     /* Allow LDTs to be cleared by the user. */
6074     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6075         if (oldmode ||
6076             (contents == 0		&&
6077              read_exec_only == 1	&&
6078              seg_32bit == 0		&&
6079              limit_in_pages == 0	&&
6080              seg_not_present == 1	&&
6081              useable == 0 )) {
6082             entry_1 = 0;
6083             entry_2 = 0;
6084             goto install;
6085         }
6086     }
6087 
6088     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6089         (ldt_info.limit & 0x0ffff);
6090     entry_2 = (ldt_info.base_addr & 0xff000000) |
6091         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6092         (ldt_info.limit & 0xf0000) |
6093         ((read_exec_only ^ 1) << 9) |
6094         (contents << 10) |
6095         ((seg_not_present ^ 1) << 15) |
6096         (seg_32bit << 22) |
6097         (limit_in_pages << 23) |
6098         (lm << 21) |
6099         0x7000;
6100     if (!oldmode)
6101         entry_2 |= (useable << 20);
6102 
6103     /* Install the new entry ...  */
6104 install:
6105     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6106     lp[0] = tswap32(entry_1);
6107     lp[1] = tswap32(entry_2);
6108     return 0;
6109 }
6110 
6111 /* specific and weird i386 syscalls */
6112 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6113                               unsigned long bytecount)
6114 {
6115     abi_long ret;
6116 
6117     switch (func) {
6118     case 0:
6119         ret = read_ldt(ptr, bytecount);
6120         break;
6121     case 1:
6122         ret = write_ldt(env, ptr, bytecount, 1);
6123         break;
6124     case 0x11:
6125         ret = write_ldt(env, ptr, bytecount, 0);
6126         break;
6127     default:
6128         ret = -TARGET_ENOSYS;
6129         break;
6130     }
6131     return ret;
6132 }
6133 
6134 #if defined(TARGET_ABI32)
6135 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6136 {
6137     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6138     struct target_modify_ldt_ldt_s ldt_info;
6139     struct target_modify_ldt_ldt_s *target_ldt_info;
6140     int seg_32bit, contents, read_exec_only, limit_in_pages;
6141     int seg_not_present, useable, lm;
6142     uint32_t *lp, entry_1, entry_2;
6143     int i;
6144 
6145     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6146     if (!target_ldt_info)
6147         return -TARGET_EFAULT;
6148     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6149     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6150     ldt_info.limit = tswap32(target_ldt_info->limit);
6151     ldt_info.flags = tswap32(target_ldt_info->flags);
6152     if (ldt_info.entry_number == -1) {
6153         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6154             if (gdt_table[i] == 0) {
6155                 ldt_info.entry_number = i;
6156                 target_ldt_info->entry_number = tswap32(i);
6157                 break;
6158             }
6159         }
6160     }
6161     unlock_user_struct(target_ldt_info, ptr, 1);
6162 
6163     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6164         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6165            return -TARGET_EINVAL;
6166     seg_32bit = ldt_info.flags & 1;
6167     contents = (ldt_info.flags >> 1) & 3;
6168     read_exec_only = (ldt_info.flags >> 3) & 1;
6169     limit_in_pages = (ldt_info.flags >> 4) & 1;
6170     seg_not_present = (ldt_info.flags >> 5) & 1;
6171     useable = (ldt_info.flags >> 6) & 1;
6172 #ifdef TARGET_ABI32
6173     lm = 0;
6174 #else
6175     lm = (ldt_info.flags >> 7) & 1;
6176 #endif
6177 
6178     if (contents == 3) {
6179         if (seg_not_present == 0)
6180             return -TARGET_EINVAL;
6181     }
6182 
6183     /* NOTE: same code as Linux kernel */
6184     /* Allow LDTs to be cleared by the user. */
6185     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6186         if ((contents == 0             &&
6187              read_exec_only == 1       &&
6188              seg_32bit == 0            &&
6189              limit_in_pages == 0       &&
6190              seg_not_present == 1      &&
6191              useable == 0 )) {
6192             entry_1 = 0;
6193             entry_2 = 0;
6194             goto install;
6195         }
6196     }
6197 
6198     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6199         (ldt_info.limit & 0x0ffff);
6200     entry_2 = (ldt_info.base_addr & 0xff000000) |
6201         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6202         (ldt_info.limit & 0xf0000) |
6203         ((read_exec_only ^ 1) << 9) |
6204         (contents << 10) |
6205         ((seg_not_present ^ 1) << 15) |
6206         (seg_32bit << 22) |
6207         (limit_in_pages << 23) |
6208         (useable << 20) |
6209         (lm << 21) |
6210         0x7000;
6211 
6212     /* Install the new entry ...  */
6213 install:
6214     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6215     lp[0] = tswap32(entry_1);
6216     lp[1] = tswap32(entry_2);
6217     return 0;
6218 }
6219 
6220 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6221 {
6222     struct target_modify_ldt_ldt_s *target_ldt_info;
6223     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6224     uint32_t base_addr, limit, flags;
6225     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6226     int seg_not_present, useable, lm;
6227     uint32_t *lp, entry_1, entry_2;
6228 
6229     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6230     if (!target_ldt_info)
6231         return -TARGET_EFAULT;
6232     idx = tswap32(target_ldt_info->entry_number);
6233     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6234         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6235         unlock_user_struct(target_ldt_info, ptr, 1);
6236         return -TARGET_EINVAL;
6237     }
6238     lp = (uint32_t *)(gdt_table + idx);
6239     entry_1 = tswap32(lp[0]);
6240     entry_2 = tswap32(lp[1]);
6241 
6242     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6243     contents = (entry_2 >> 10) & 3;
6244     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6245     seg_32bit = (entry_2 >> 22) & 1;
6246     limit_in_pages = (entry_2 >> 23) & 1;
6247     useable = (entry_2 >> 20) & 1;
6248 #ifdef TARGET_ABI32
6249     lm = 0;
6250 #else
6251     lm = (entry_2 >> 21) & 1;
6252 #endif
6253     flags = (seg_32bit << 0) | (contents << 1) |
6254         (read_exec_only << 3) | (limit_in_pages << 4) |
6255         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6256     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6257     base_addr = (entry_1 >> 16) |
6258         (entry_2 & 0xff000000) |
6259         ((entry_2 & 0xff) << 16);
6260     target_ldt_info->base_addr = tswapal(base_addr);
6261     target_ldt_info->limit = tswap32(limit);
6262     target_ldt_info->flags = tswap32(flags);
6263     unlock_user_struct(target_ldt_info, ptr, 1);
6264     return 0;
6265 }
6266 
6267 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6268 {
6269     return -TARGET_ENOSYS;
6270 }
6271 #else
6272 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6273 {
6274     abi_long ret = 0;
6275     abi_ulong val;
6276     int idx;
6277 
6278     switch(code) {
6279     case TARGET_ARCH_SET_GS:
6280     case TARGET_ARCH_SET_FS:
6281         if (code == TARGET_ARCH_SET_GS)
6282             idx = R_GS;
6283         else
6284             idx = R_FS;
6285         cpu_x86_load_seg(env, idx, 0);
6286         env->segs[idx].base = addr;
6287         break;
6288     case TARGET_ARCH_GET_GS:
6289     case TARGET_ARCH_GET_FS:
6290         if (code == TARGET_ARCH_GET_GS)
6291             idx = R_GS;
6292         else
6293             idx = R_FS;
6294         val = env->segs[idx].base;
6295         if (put_user(val, addr, abi_ulong))
6296             ret = -TARGET_EFAULT;
6297         break;
6298     default:
6299         ret = -TARGET_EINVAL;
6300         break;
6301     }
6302     return ret;
6303 }
6304 #endif /* defined(TARGET_ABI32 */
6305 #endif /* defined(TARGET_I386) */
6306 
6307 /*
6308  * These constants are generic.  Supply any that are missing from the host.
6309  */
6310 #ifndef PR_SET_NAME
6311 # define PR_SET_NAME    15
6312 # define PR_GET_NAME    16
6313 #endif
6314 #ifndef PR_SET_FP_MODE
6315 # define PR_SET_FP_MODE 45
6316 # define PR_GET_FP_MODE 46
6317 # define PR_FP_MODE_FR   (1 << 0)
6318 # define PR_FP_MODE_FRE  (1 << 1)
6319 #endif
6320 #ifndef PR_SVE_SET_VL
6321 # define PR_SVE_SET_VL  50
6322 # define PR_SVE_GET_VL  51
6323 # define PR_SVE_VL_LEN_MASK  0xffff
6324 # define PR_SVE_VL_INHERIT   (1 << 17)
6325 #endif
6326 #ifndef PR_PAC_RESET_KEYS
6327 # define PR_PAC_RESET_KEYS  54
6328 # define PR_PAC_APIAKEY   (1 << 0)
6329 # define PR_PAC_APIBKEY   (1 << 1)
6330 # define PR_PAC_APDAKEY   (1 << 2)
6331 # define PR_PAC_APDBKEY   (1 << 3)
6332 # define PR_PAC_APGAKEY   (1 << 4)
6333 #endif
6334 #ifndef PR_SET_TAGGED_ADDR_CTRL
6335 # define PR_SET_TAGGED_ADDR_CTRL 55
6336 # define PR_GET_TAGGED_ADDR_CTRL 56
6337 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6338 #endif
6339 #ifndef PR_MTE_TCF_SHIFT
6340 # define PR_MTE_TCF_SHIFT       1
6341 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6342 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6343 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6344 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6345 # define PR_MTE_TAG_SHIFT       3
6346 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6347 #endif
6348 #ifndef PR_SET_IO_FLUSHER
6349 # define PR_SET_IO_FLUSHER 57
6350 # define PR_GET_IO_FLUSHER 58
6351 #endif
6352 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6353 # define PR_SET_SYSCALL_USER_DISPATCH 59
6354 #endif
6355 #ifndef PR_SME_SET_VL
6356 # define PR_SME_SET_VL  63
6357 # define PR_SME_GET_VL  64
6358 # define PR_SME_VL_LEN_MASK  0xffff
6359 # define PR_SME_VL_INHERIT   (1 << 17)
6360 #endif
6361 
6362 #include "target_prctl.h"
6363 
6364 static abi_long do_prctl_inval0(CPUArchState *env)
6365 {
6366     return -TARGET_EINVAL;
6367 }
6368 
6369 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6370 {
6371     return -TARGET_EINVAL;
6372 }
6373 
6374 #ifndef do_prctl_get_fp_mode
6375 #define do_prctl_get_fp_mode do_prctl_inval0
6376 #endif
6377 #ifndef do_prctl_set_fp_mode
6378 #define do_prctl_set_fp_mode do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_sve_get_vl
6381 #define do_prctl_sve_get_vl do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_sve_set_vl
6384 #define do_prctl_sve_set_vl do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_reset_keys
6387 #define do_prctl_reset_keys do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_set_tagged_addr_ctrl
6390 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_get_tagged_addr_ctrl
6393 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6394 #endif
6395 #ifndef do_prctl_get_unalign
6396 #define do_prctl_get_unalign do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_set_unalign
6399 #define do_prctl_set_unalign do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_sme_get_vl
6402 #define do_prctl_sme_get_vl do_prctl_inval0
6403 #endif
6404 #ifndef do_prctl_sme_set_vl
6405 #define do_prctl_sme_set_vl do_prctl_inval1
6406 #endif
6407 
6408 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6409                          abi_long arg3, abi_long arg4, abi_long arg5)
6410 {
6411     abi_long ret;
6412 
6413     switch (option) {
6414     case PR_GET_PDEATHSIG:
6415         {
6416             int deathsig;
6417             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6418                                   arg3, arg4, arg5));
6419             if (!is_error(ret) &&
6420                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6421                 return -TARGET_EFAULT;
6422             }
6423             return ret;
6424         }
6425     case PR_SET_PDEATHSIG:
6426         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6427                                arg3, arg4, arg5));
6428     case PR_GET_NAME:
6429         {
6430             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6431             if (!name) {
6432                 return -TARGET_EFAULT;
6433             }
6434             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6435                                   arg3, arg4, arg5));
6436             unlock_user(name, arg2, 16);
6437             return ret;
6438         }
6439     case PR_SET_NAME:
6440         {
6441             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6442             if (!name) {
6443                 return -TARGET_EFAULT;
6444             }
6445             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6446                                   arg3, arg4, arg5));
6447             unlock_user(name, arg2, 0);
6448             return ret;
6449         }
6450     case PR_GET_FP_MODE:
6451         return do_prctl_get_fp_mode(env);
6452     case PR_SET_FP_MODE:
6453         return do_prctl_set_fp_mode(env, arg2);
6454     case PR_SVE_GET_VL:
6455         return do_prctl_sve_get_vl(env);
6456     case PR_SVE_SET_VL:
6457         return do_prctl_sve_set_vl(env, arg2);
6458     case PR_SME_GET_VL:
6459         return do_prctl_sme_get_vl(env);
6460     case PR_SME_SET_VL:
6461         return do_prctl_sme_set_vl(env, arg2);
6462     case PR_PAC_RESET_KEYS:
6463         if (arg3 || arg4 || arg5) {
6464             return -TARGET_EINVAL;
6465         }
6466         return do_prctl_reset_keys(env, arg2);
6467     case PR_SET_TAGGED_ADDR_CTRL:
6468         if (arg3 || arg4 || arg5) {
6469             return -TARGET_EINVAL;
6470         }
6471         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6472     case PR_GET_TAGGED_ADDR_CTRL:
6473         if (arg2 || arg3 || arg4 || arg5) {
6474             return -TARGET_EINVAL;
6475         }
6476         return do_prctl_get_tagged_addr_ctrl(env);
6477 
6478     case PR_GET_UNALIGN:
6479         return do_prctl_get_unalign(env, arg2);
6480     case PR_SET_UNALIGN:
6481         return do_prctl_set_unalign(env, arg2);
6482 
6483     case PR_CAP_AMBIENT:
6484     case PR_CAPBSET_READ:
6485     case PR_CAPBSET_DROP:
6486     case PR_GET_DUMPABLE:
6487     case PR_SET_DUMPABLE:
6488     case PR_GET_KEEPCAPS:
6489     case PR_SET_KEEPCAPS:
6490     case PR_GET_SECUREBITS:
6491     case PR_SET_SECUREBITS:
6492     case PR_GET_TIMING:
6493     case PR_SET_TIMING:
6494     case PR_GET_TIMERSLACK:
6495     case PR_SET_TIMERSLACK:
6496     case PR_MCE_KILL:
6497     case PR_MCE_KILL_GET:
6498     case PR_GET_NO_NEW_PRIVS:
6499     case PR_SET_NO_NEW_PRIVS:
6500     case PR_GET_IO_FLUSHER:
6501     case PR_SET_IO_FLUSHER:
6502         /* Some prctl options have no pointer arguments and we can pass on. */
6503         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6504 
6505     case PR_GET_CHILD_SUBREAPER:
6506     case PR_SET_CHILD_SUBREAPER:
6507     case PR_GET_SPECULATION_CTRL:
6508     case PR_SET_SPECULATION_CTRL:
6509     case PR_GET_TID_ADDRESS:
6510         /* TODO */
6511         return -TARGET_EINVAL;
6512 
6513     case PR_GET_FPEXC:
6514     case PR_SET_FPEXC:
6515         /* Was used for SPE on PowerPC. */
6516         return -TARGET_EINVAL;
6517 
6518     case PR_GET_ENDIAN:
6519     case PR_SET_ENDIAN:
6520     case PR_GET_FPEMU:
6521     case PR_SET_FPEMU:
6522     case PR_SET_MM:
6523     case PR_GET_SECCOMP:
6524     case PR_SET_SECCOMP:
6525     case PR_SET_SYSCALL_USER_DISPATCH:
6526     case PR_GET_THP_DISABLE:
6527     case PR_SET_THP_DISABLE:
6528     case PR_GET_TSC:
6529     case PR_SET_TSC:
6530         /* Disable to prevent the target disabling stuff we need. */
6531         return -TARGET_EINVAL;
6532 
6533     default:
6534         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6535                       option);
6536         return -TARGET_EINVAL;
6537     }
6538 }
6539 
6540 #define NEW_STACK_SIZE 0x40000
6541 
6542 
6543 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6544 typedef struct {
6545     CPUArchState *env;
6546     pthread_mutex_t mutex;
6547     pthread_cond_t cond;
6548     pthread_t thread;
6549     uint32_t tid;
6550     abi_ulong child_tidptr;
6551     abi_ulong parent_tidptr;
6552     sigset_t sigmask;
6553 } new_thread_info;
6554 
6555 static void *clone_func(void *arg)
6556 {
6557     new_thread_info *info = arg;
6558     CPUArchState *env;
6559     CPUState *cpu;
6560     TaskState *ts;
6561 
6562     rcu_register_thread();
6563     tcg_register_thread();
6564     env = info->env;
6565     cpu = env_cpu(env);
6566     thread_cpu = cpu;
6567     ts = (TaskState *)cpu->opaque;
6568     info->tid = sys_gettid();
6569     task_settid(ts);
6570     if (info->child_tidptr)
6571         put_user_u32(info->tid, info->child_tidptr);
6572     if (info->parent_tidptr)
6573         put_user_u32(info->tid, info->parent_tidptr);
6574     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6575     /* Enable signals.  */
6576     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6577     /* Signal to the parent that we're ready.  */
6578     pthread_mutex_lock(&info->mutex);
6579     pthread_cond_broadcast(&info->cond);
6580     pthread_mutex_unlock(&info->mutex);
6581     /* Wait until the parent has finished initializing the tls state.  */
6582     pthread_mutex_lock(&clone_lock);
6583     pthread_mutex_unlock(&clone_lock);
6584     cpu_loop(env);
6585     /* never exits */
6586     return NULL;
6587 }
6588 
6589 /* do_fork() Must return host values and target errnos (unlike most
6590    do_*() functions). */
6591 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6592                    abi_ulong parent_tidptr, target_ulong newtls,
6593                    abi_ulong child_tidptr)
6594 {
6595     CPUState *cpu = env_cpu(env);
6596     int ret;
6597     TaskState *ts;
6598     CPUState *new_cpu;
6599     CPUArchState *new_env;
6600     sigset_t sigmask;
6601 
6602     flags &= ~CLONE_IGNORED_FLAGS;
6603 
6604     /* Emulate vfork() with fork() */
6605     if (flags & CLONE_VFORK)
6606         flags &= ~(CLONE_VFORK | CLONE_VM);
6607 
6608     if (flags & CLONE_VM) {
6609         TaskState *parent_ts = (TaskState *)cpu->opaque;
6610         new_thread_info info;
6611         pthread_attr_t attr;
6612 
6613         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6614             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6615             return -TARGET_EINVAL;
6616         }
6617 
6618         ts = g_new0(TaskState, 1);
6619         init_task_state(ts);
6620 
6621         /* Grab a mutex so that thread setup appears atomic.  */
6622         pthread_mutex_lock(&clone_lock);
6623 
6624         /*
6625          * If this is our first additional thread, we need to ensure we
6626          * generate code for parallel execution and flush old translations.
6627          * Do this now so that the copy gets CF_PARALLEL too.
6628          */
6629         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6630             cpu->tcg_cflags |= CF_PARALLEL;
6631             tb_flush(cpu);
6632         }
6633 
6634         /* we create a new CPU instance. */
6635         new_env = cpu_copy(env);
6636         /* Init regs that differ from the parent.  */
6637         cpu_clone_regs_child(new_env, newsp, flags);
6638         cpu_clone_regs_parent(env, flags);
6639         new_cpu = env_cpu(new_env);
6640         new_cpu->opaque = ts;
6641         ts->bprm = parent_ts->bprm;
6642         ts->info = parent_ts->info;
6643         ts->signal_mask = parent_ts->signal_mask;
6644 
6645         if (flags & CLONE_CHILD_CLEARTID) {
6646             ts->child_tidptr = child_tidptr;
6647         }
6648 
6649         if (flags & CLONE_SETTLS) {
6650             cpu_set_tls (new_env, newtls);
6651         }
6652 
6653         memset(&info, 0, sizeof(info));
6654         pthread_mutex_init(&info.mutex, NULL);
6655         pthread_mutex_lock(&info.mutex);
6656         pthread_cond_init(&info.cond, NULL);
6657         info.env = new_env;
6658         if (flags & CLONE_CHILD_SETTID) {
6659             info.child_tidptr = child_tidptr;
6660         }
6661         if (flags & CLONE_PARENT_SETTID) {
6662             info.parent_tidptr = parent_tidptr;
6663         }
6664 
6665         ret = pthread_attr_init(&attr);
6666         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6667         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6668         /* It is not safe to deliver signals until the child has finished
6669            initializing, so temporarily block all signals.  */
6670         sigfillset(&sigmask);
6671         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6672         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6673 
6674         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6675         /* TODO: Free new CPU state if thread creation failed.  */
6676 
6677         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6678         pthread_attr_destroy(&attr);
6679         if (ret == 0) {
6680             /* Wait for the child to initialize.  */
6681             pthread_cond_wait(&info.cond, &info.mutex);
6682             ret = info.tid;
6683         } else {
6684             ret = -1;
6685         }
6686         pthread_mutex_unlock(&info.mutex);
6687         pthread_cond_destroy(&info.cond);
6688         pthread_mutex_destroy(&info.mutex);
6689         pthread_mutex_unlock(&clone_lock);
6690     } else {
6691         /* if no CLONE_VM, we consider it is a fork */
6692         if (flags & CLONE_INVALID_FORK_FLAGS) {
6693             return -TARGET_EINVAL;
6694         }
6695 
6696         /* We can't support custom termination signals */
6697         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6698             return -TARGET_EINVAL;
6699         }
6700 
6701         if (block_signals()) {
6702             return -QEMU_ERESTARTSYS;
6703         }
6704 
6705         fork_start();
6706         ret = fork();
6707         if (ret == 0) {
6708             /* Child Process.  */
6709             cpu_clone_regs_child(env, newsp, flags);
6710             fork_end(1);
6711             /* There is a race condition here.  The parent process could
6712                theoretically read the TID in the child process before the child
6713                tid is set.  This would require using either ptrace
6714                (not implemented) or having *_tidptr to point at a shared memory
6715                mapping.  We can't repeat the spinlock hack used above because
6716                the child process gets its own copy of the lock.  */
6717             if (flags & CLONE_CHILD_SETTID)
6718                 put_user_u32(sys_gettid(), child_tidptr);
6719             if (flags & CLONE_PARENT_SETTID)
6720                 put_user_u32(sys_gettid(), parent_tidptr);
6721             ts = (TaskState *)cpu->opaque;
6722             if (flags & CLONE_SETTLS)
6723                 cpu_set_tls (env, newtls);
6724             if (flags & CLONE_CHILD_CLEARTID)
6725                 ts->child_tidptr = child_tidptr;
6726         } else {
6727             cpu_clone_regs_parent(env, flags);
6728             fork_end(0);
6729         }
6730     }
6731     return ret;
6732 }
6733 
6734 /* warning : doesn't handle linux specific flags... */
6735 static int target_to_host_fcntl_cmd(int cmd)
6736 {
6737     int ret;
6738 
6739     switch(cmd) {
6740     case TARGET_F_DUPFD:
6741     case TARGET_F_GETFD:
6742     case TARGET_F_SETFD:
6743     case TARGET_F_GETFL:
6744     case TARGET_F_SETFL:
6745     case TARGET_F_OFD_GETLK:
6746     case TARGET_F_OFD_SETLK:
6747     case TARGET_F_OFD_SETLKW:
6748         ret = cmd;
6749         break;
6750     case TARGET_F_GETLK:
6751         ret = F_GETLK64;
6752         break;
6753     case TARGET_F_SETLK:
6754         ret = F_SETLK64;
6755         break;
6756     case TARGET_F_SETLKW:
6757         ret = F_SETLKW64;
6758         break;
6759     case TARGET_F_GETOWN:
6760         ret = F_GETOWN;
6761         break;
6762     case TARGET_F_SETOWN:
6763         ret = F_SETOWN;
6764         break;
6765     case TARGET_F_GETSIG:
6766         ret = F_GETSIG;
6767         break;
6768     case TARGET_F_SETSIG:
6769         ret = F_SETSIG;
6770         break;
6771 #if TARGET_ABI_BITS == 32
6772     case TARGET_F_GETLK64:
6773         ret = F_GETLK64;
6774         break;
6775     case TARGET_F_SETLK64:
6776         ret = F_SETLK64;
6777         break;
6778     case TARGET_F_SETLKW64:
6779         ret = F_SETLKW64;
6780         break;
6781 #endif
6782     case TARGET_F_SETLEASE:
6783         ret = F_SETLEASE;
6784         break;
6785     case TARGET_F_GETLEASE:
6786         ret = F_GETLEASE;
6787         break;
6788 #ifdef F_DUPFD_CLOEXEC
6789     case TARGET_F_DUPFD_CLOEXEC:
6790         ret = F_DUPFD_CLOEXEC;
6791         break;
6792 #endif
6793     case TARGET_F_NOTIFY:
6794         ret = F_NOTIFY;
6795         break;
6796 #ifdef F_GETOWN_EX
6797     case TARGET_F_GETOWN_EX:
6798         ret = F_GETOWN_EX;
6799         break;
6800 #endif
6801 #ifdef F_SETOWN_EX
6802     case TARGET_F_SETOWN_EX:
6803         ret = F_SETOWN_EX;
6804         break;
6805 #endif
6806 #ifdef F_SETPIPE_SZ
6807     case TARGET_F_SETPIPE_SZ:
6808         ret = F_SETPIPE_SZ;
6809         break;
6810     case TARGET_F_GETPIPE_SZ:
6811         ret = F_GETPIPE_SZ;
6812         break;
6813 #endif
6814 #ifdef F_ADD_SEALS
6815     case TARGET_F_ADD_SEALS:
6816         ret = F_ADD_SEALS;
6817         break;
6818     case TARGET_F_GET_SEALS:
6819         ret = F_GET_SEALS;
6820         break;
6821 #endif
6822     default:
6823         ret = -TARGET_EINVAL;
6824         break;
6825     }
6826 
6827 #if defined(__powerpc64__)
6828     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6829      * is not supported by kernel. The glibc fcntl call actually adjusts
6830      * them to 5, 6 and 7 before making the syscall(). Since we make the
6831      * syscall directly, adjust to what is supported by the kernel.
6832      */
6833     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6834         ret -= F_GETLK64 - 5;
6835     }
6836 #endif
6837 
6838     return ret;
6839 }
6840 
6841 #define FLOCK_TRANSTBL \
6842     switch (type) { \
6843     TRANSTBL_CONVERT(F_RDLCK); \
6844     TRANSTBL_CONVERT(F_WRLCK); \
6845     TRANSTBL_CONVERT(F_UNLCK); \
6846     }
6847 
6848 static int target_to_host_flock(int type)
6849 {
6850 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6851     FLOCK_TRANSTBL
6852 #undef  TRANSTBL_CONVERT
6853     return -TARGET_EINVAL;
6854 }
6855 
6856 static int host_to_target_flock(int type)
6857 {
6858 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6859     FLOCK_TRANSTBL
6860 #undef  TRANSTBL_CONVERT
6861     /* if we don't know how to convert the value coming
6862      * from the host we copy to the target field as-is
6863      */
6864     return type;
6865 }
6866 
6867 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6868                                             abi_ulong target_flock_addr)
6869 {
6870     struct target_flock *target_fl;
6871     int l_type;
6872 
6873     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6874         return -TARGET_EFAULT;
6875     }
6876 
6877     __get_user(l_type, &target_fl->l_type);
6878     l_type = target_to_host_flock(l_type);
6879     if (l_type < 0) {
6880         return l_type;
6881     }
6882     fl->l_type = l_type;
6883     __get_user(fl->l_whence, &target_fl->l_whence);
6884     __get_user(fl->l_start, &target_fl->l_start);
6885     __get_user(fl->l_len, &target_fl->l_len);
6886     __get_user(fl->l_pid, &target_fl->l_pid);
6887     unlock_user_struct(target_fl, target_flock_addr, 0);
6888     return 0;
6889 }
6890 
6891 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6892                                           const struct flock64 *fl)
6893 {
6894     struct target_flock *target_fl;
6895     short l_type;
6896 
6897     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6898         return -TARGET_EFAULT;
6899     }
6900 
6901     l_type = host_to_target_flock(fl->l_type);
6902     __put_user(l_type, &target_fl->l_type);
6903     __put_user(fl->l_whence, &target_fl->l_whence);
6904     __put_user(fl->l_start, &target_fl->l_start);
6905     __put_user(fl->l_len, &target_fl->l_len);
6906     __put_user(fl->l_pid, &target_fl->l_pid);
6907     unlock_user_struct(target_fl, target_flock_addr, 1);
6908     return 0;
6909 }
6910 
6911 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6912 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6913 
6914 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6915 struct target_oabi_flock64 {
6916     abi_short l_type;
6917     abi_short l_whence;
6918     abi_llong l_start;
6919     abi_llong l_len;
6920     abi_int   l_pid;
6921 } QEMU_PACKED;
6922 
6923 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6924                                                    abi_ulong target_flock_addr)
6925 {
6926     struct target_oabi_flock64 *target_fl;
6927     int l_type;
6928 
6929     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6930         return -TARGET_EFAULT;
6931     }
6932 
6933     __get_user(l_type, &target_fl->l_type);
6934     l_type = target_to_host_flock(l_type);
6935     if (l_type < 0) {
6936         return l_type;
6937     }
6938     fl->l_type = l_type;
6939     __get_user(fl->l_whence, &target_fl->l_whence);
6940     __get_user(fl->l_start, &target_fl->l_start);
6941     __get_user(fl->l_len, &target_fl->l_len);
6942     __get_user(fl->l_pid, &target_fl->l_pid);
6943     unlock_user_struct(target_fl, target_flock_addr, 0);
6944     return 0;
6945 }
6946 
6947 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6948                                                  const struct flock64 *fl)
6949 {
6950     struct target_oabi_flock64 *target_fl;
6951     short l_type;
6952 
6953     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6954         return -TARGET_EFAULT;
6955     }
6956 
6957     l_type = host_to_target_flock(fl->l_type);
6958     __put_user(l_type, &target_fl->l_type);
6959     __put_user(fl->l_whence, &target_fl->l_whence);
6960     __put_user(fl->l_start, &target_fl->l_start);
6961     __put_user(fl->l_len, &target_fl->l_len);
6962     __put_user(fl->l_pid, &target_fl->l_pid);
6963     unlock_user_struct(target_fl, target_flock_addr, 1);
6964     return 0;
6965 }
6966 #endif
6967 
6968 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6969                                               abi_ulong target_flock_addr)
6970 {
6971     struct target_flock64 *target_fl;
6972     int l_type;
6973 
6974     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6975         return -TARGET_EFAULT;
6976     }
6977 
6978     __get_user(l_type, &target_fl->l_type);
6979     l_type = target_to_host_flock(l_type);
6980     if (l_type < 0) {
6981         return l_type;
6982     }
6983     fl->l_type = l_type;
6984     __get_user(fl->l_whence, &target_fl->l_whence);
6985     __get_user(fl->l_start, &target_fl->l_start);
6986     __get_user(fl->l_len, &target_fl->l_len);
6987     __get_user(fl->l_pid, &target_fl->l_pid);
6988     unlock_user_struct(target_fl, target_flock_addr, 0);
6989     return 0;
6990 }
6991 
6992 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6993                                             const struct flock64 *fl)
6994 {
6995     struct target_flock64 *target_fl;
6996     short l_type;
6997 
6998     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6999         return -TARGET_EFAULT;
7000     }
7001 
7002     l_type = host_to_target_flock(fl->l_type);
7003     __put_user(l_type, &target_fl->l_type);
7004     __put_user(fl->l_whence, &target_fl->l_whence);
7005     __put_user(fl->l_start, &target_fl->l_start);
7006     __put_user(fl->l_len, &target_fl->l_len);
7007     __put_user(fl->l_pid, &target_fl->l_pid);
7008     unlock_user_struct(target_fl, target_flock_addr, 1);
7009     return 0;
7010 }
7011 
7012 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7013 {
7014     struct flock64 fl64;
7015 #ifdef F_GETOWN_EX
7016     struct f_owner_ex fox;
7017     struct target_f_owner_ex *target_fox;
7018 #endif
7019     abi_long ret;
7020     int host_cmd = target_to_host_fcntl_cmd(cmd);
7021 
7022     if (host_cmd == -TARGET_EINVAL)
7023 	    return host_cmd;
7024 
7025     switch(cmd) {
7026     case TARGET_F_GETLK:
7027         ret = copy_from_user_flock(&fl64, arg);
7028         if (ret) {
7029             return ret;
7030         }
7031         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7032         if (ret == 0) {
7033             ret = copy_to_user_flock(arg, &fl64);
7034         }
7035         break;
7036 
7037     case TARGET_F_SETLK:
7038     case TARGET_F_SETLKW:
7039         ret = copy_from_user_flock(&fl64, arg);
7040         if (ret) {
7041             return ret;
7042         }
7043         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044         break;
7045 
7046     case TARGET_F_GETLK64:
7047     case TARGET_F_OFD_GETLK:
7048         ret = copy_from_user_flock64(&fl64, arg);
7049         if (ret) {
7050             return ret;
7051         }
7052         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7053         if (ret == 0) {
7054             ret = copy_to_user_flock64(arg, &fl64);
7055         }
7056         break;
7057     case TARGET_F_SETLK64:
7058     case TARGET_F_SETLKW64:
7059     case TARGET_F_OFD_SETLK:
7060     case TARGET_F_OFD_SETLKW:
7061         ret = copy_from_user_flock64(&fl64, arg);
7062         if (ret) {
7063             return ret;
7064         }
7065         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7066         break;
7067 
7068     case TARGET_F_GETFL:
7069         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7070         if (ret >= 0) {
7071             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7072         }
7073         break;
7074 
7075     case TARGET_F_SETFL:
7076         ret = get_errno(safe_fcntl(fd, host_cmd,
7077                                    target_to_host_bitmask(arg,
7078                                                           fcntl_flags_tbl)));
7079         break;
7080 
7081 #ifdef F_GETOWN_EX
7082     case TARGET_F_GETOWN_EX:
7083         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7084         if (ret >= 0) {
7085             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7086                 return -TARGET_EFAULT;
7087             target_fox->type = tswap32(fox.type);
7088             target_fox->pid = tswap32(fox.pid);
7089             unlock_user_struct(target_fox, arg, 1);
7090         }
7091         break;
7092 #endif
7093 
7094 #ifdef F_SETOWN_EX
7095     case TARGET_F_SETOWN_EX:
7096         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7097             return -TARGET_EFAULT;
7098         fox.type = tswap32(target_fox->type);
7099         fox.pid = tswap32(target_fox->pid);
7100         unlock_user_struct(target_fox, arg, 0);
7101         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7102         break;
7103 #endif
7104 
7105     case TARGET_F_SETSIG:
7106         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7107         break;
7108 
7109     case TARGET_F_GETSIG:
7110         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7111         break;
7112 
7113     case TARGET_F_SETOWN:
7114     case TARGET_F_GETOWN:
7115     case TARGET_F_SETLEASE:
7116     case TARGET_F_GETLEASE:
7117     case TARGET_F_SETPIPE_SZ:
7118     case TARGET_F_GETPIPE_SZ:
7119     case TARGET_F_ADD_SEALS:
7120     case TARGET_F_GET_SEALS:
7121         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7122         break;
7123 
7124     default:
7125         ret = get_errno(safe_fcntl(fd, cmd, arg));
7126         break;
7127     }
7128     return ret;
7129 }
7130 
7131 #ifdef USE_UID16
7132 
7133 static inline int high2lowuid(int uid)
7134 {
7135     if (uid > 65535)
7136         return 65534;
7137     else
7138         return uid;
7139 }
7140 
7141 static inline int high2lowgid(int gid)
7142 {
7143     if (gid > 65535)
7144         return 65534;
7145     else
7146         return gid;
7147 }
7148 
7149 static inline int low2highuid(int uid)
7150 {
7151     if ((int16_t)uid == -1)
7152         return -1;
7153     else
7154         return uid;
7155 }
7156 
7157 static inline int low2highgid(int gid)
7158 {
7159     if ((int16_t)gid == -1)
7160         return -1;
7161     else
7162         return gid;
7163 }
7164 static inline int tswapid(int id)
7165 {
7166     return tswap16(id);
7167 }
7168 
7169 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7170 
7171 #else /* !USE_UID16 */
7172 static inline int high2lowuid(int uid)
7173 {
7174     return uid;
7175 }
7176 static inline int high2lowgid(int gid)
7177 {
7178     return gid;
7179 }
7180 static inline int low2highuid(int uid)
7181 {
7182     return uid;
7183 }
7184 static inline int low2highgid(int gid)
7185 {
7186     return gid;
7187 }
7188 static inline int tswapid(int id)
7189 {
7190     return tswap32(id);
7191 }
7192 
7193 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7194 
7195 #endif /* USE_UID16 */
7196 
7197 /* We must do direct syscalls for setting UID/GID, because we want to
7198  * implement the Linux system call semantics of "change only for this thread",
7199  * not the libc/POSIX semantics of "change for all threads in process".
7200  * (See http://ewontfix.com/17/ for more details.)
7201  * We use the 32-bit version of the syscalls if present; if it is not
7202  * then either the host architecture supports 32-bit UIDs natively with
7203  * the standard syscall, or the 16-bit UID is the best we can do.
7204  */
7205 #ifdef __NR_setuid32
7206 #define __NR_sys_setuid __NR_setuid32
7207 #else
7208 #define __NR_sys_setuid __NR_setuid
7209 #endif
7210 #ifdef __NR_setgid32
7211 #define __NR_sys_setgid __NR_setgid32
7212 #else
7213 #define __NR_sys_setgid __NR_setgid
7214 #endif
7215 #ifdef __NR_setresuid32
7216 #define __NR_sys_setresuid __NR_setresuid32
7217 #else
7218 #define __NR_sys_setresuid __NR_setresuid
7219 #endif
7220 #ifdef __NR_setresgid32
7221 #define __NR_sys_setresgid __NR_setresgid32
7222 #else
7223 #define __NR_sys_setresgid __NR_setresgid
7224 #endif
7225 
7226 _syscall1(int, sys_setuid, uid_t, uid)
7227 _syscall1(int, sys_setgid, gid_t, gid)
7228 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7229 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7230 
7231 void syscall_init(void)
7232 {
7233     IOCTLEntry *ie;
7234     const argtype *arg_type;
7235     int size;
7236 
7237     thunk_init(STRUCT_MAX);
7238 
7239 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7240 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7241 #include "syscall_types.h"
7242 #undef STRUCT
7243 #undef STRUCT_SPECIAL
7244 
7245     /* we patch the ioctl size if necessary. We rely on the fact that
7246        no ioctl has all the bits at '1' in the size field */
7247     ie = ioctl_entries;
7248     while (ie->target_cmd != 0) {
7249         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7250             TARGET_IOC_SIZEMASK) {
7251             arg_type = ie->arg_type;
7252             if (arg_type[0] != TYPE_PTR) {
7253                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7254                         ie->target_cmd);
7255                 exit(1);
7256             }
7257             arg_type++;
7258             size = thunk_type_size(arg_type, 0);
7259             ie->target_cmd = (ie->target_cmd &
7260                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7261                 (size << TARGET_IOC_SIZESHIFT);
7262         }
7263 
7264         /* automatic consistency check if same arch */
7265 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7266     (defined(__x86_64__) && defined(TARGET_X86_64))
7267         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7268             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7269                     ie->name, ie->target_cmd, ie->host_cmd);
7270         }
7271 #endif
7272         ie++;
7273     }
7274 }
7275 
7276 #ifdef TARGET_NR_truncate64
7277 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7278                                          abi_long arg2,
7279                                          abi_long arg3,
7280                                          abi_long arg4)
7281 {
7282     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7283         arg2 = arg3;
7284         arg3 = arg4;
7285     }
7286     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7287 }
7288 #endif
7289 
7290 #ifdef TARGET_NR_ftruncate64
7291 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7292                                           abi_long arg2,
7293                                           abi_long arg3,
7294                                           abi_long arg4)
7295 {
7296     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7297         arg2 = arg3;
7298         arg3 = arg4;
7299     }
7300     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7301 }
7302 #endif
7303 
7304 #if defined(TARGET_NR_timer_settime) || \
7305     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7306 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7307                                                  abi_ulong target_addr)
7308 {
7309     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7310                                 offsetof(struct target_itimerspec,
7311                                          it_interval)) ||
7312         target_to_host_timespec(&host_its->it_value, target_addr +
7313                                 offsetof(struct target_itimerspec,
7314                                          it_value))) {
7315         return -TARGET_EFAULT;
7316     }
7317 
7318     return 0;
7319 }
7320 #endif
7321 
7322 #if defined(TARGET_NR_timer_settime64) || \
7323     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7324 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7325                                                    abi_ulong target_addr)
7326 {
7327     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7328                                   offsetof(struct target__kernel_itimerspec,
7329                                            it_interval)) ||
7330         target_to_host_timespec64(&host_its->it_value, target_addr +
7331                                   offsetof(struct target__kernel_itimerspec,
7332                                            it_value))) {
7333         return -TARGET_EFAULT;
7334     }
7335 
7336     return 0;
7337 }
7338 #endif
7339 
7340 #if ((defined(TARGET_NR_timerfd_gettime) || \
7341       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7342       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7343 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7344                                                  struct itimerspec *host_its)
7345 {
7346     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7347                                                        it_interval),
7348                                 &host_its->it_interval) ||
7349         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7350                                                        it_value),
7351                                 &host_its->it_value)) {
7352         return -TARGET_EFAULT;
7353     }
7354     return 0;
7355 }
7356 #endif
7357 
7358 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7359       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7360       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7361 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7362                                                    struct itimerspec *host_its)
7363 {
7364     if (host_to_target_timespec64(target_addr +
7365                                   offsetof(struct target__kernel_itimerspec,
7366                                            it_interval),
7367                                   &host_its->it_interval) ||
7368         host_to_target_timespec64(target_addr +
7369                                   offsetof(struct target__kernel_itimerspec,
7370                                            it_value),
7371                                   &host_its->it_value)) {
7372         return -TARGET_EFAULT;
7373     }
7374     return 0;
7375 }
7376 #endif
7377 
7378 #if defined(TARGET_NR_adjtimex) || \
7379     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7380 static inline abi_long target_to_host_timex(struct timex *host_tx,
7381                                             abi_long target_addr)
7382 {
7383     struct target_timex *target_tx;
7384 
7385     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7386         return -TARGET_EFAULT;
7387     }
7388 
7389     __get_user(host_tx->modes, &target_tx->modes);
7390     __get_user(host_tx->offset, &target_tx->offset);
7391     __get_user(host_tx->freq, &target_tx->freq);
7392     __get_user(host_tx->maxerror, &target_tx->maxerror);
7393     __get_user(host_tx->esterror, &target_tx->esterror);
7394     __get_user(host_tx->status, &target_tx->status);
7395     __get_user(host_tx->constant, &target_tx->constant);
7396     __get_user(host_tx->precision, &target_tx->precision);
7397     __get_user(host_tx->tolerance, &target_tx->tolerance);
7398     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7399     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7400     __get_user(host_tx->tick, &target_tx->tick);
7401     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7402     __get_user(host_tx->jitter, &target_tx->jitter);
7403     __get_user(host_tx->shift, &target_tx->shift);
7404     __get_user(host_tx->stabil, &target_tx->stabil);
7405     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7406     __get_user(host_tx->calcnt, &target_tx->calcnt);
7407     __get_user(host_tx->errcnt, &target_tx->errcnt);
7408     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7409     __get_user(host_tx->tai, &target_tx->tai);
7410 
7411     unlock_user_struct(target_tx, target_addr, 0);
7412     return 0;
7413 }
7414 
7415 static inline abi_long host_to_target_timex(abi_long target_addr,
7416                                             struct timex *host_tx)
7417 {
7418     struct target_timex *target_tx;
7419 
7420     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7421         return -TARGET_EFAULT;
7422     }
7423 
7424     __put_user(host_tx->modes, &target_tx->modes);
7425     __put_user(host_tx->offset, &target_tx->offset);
7426     __put_user(host_tx->freq, &target_tx->freq);
7427     __put_user(host_tx->maxerror, &target_tx->maxerror);
7428     __put_user(host_tx->esterror, &target_tx->esterror);
7429     __put_user(host_tx->status, &target_tx->status);
7430     __put_user(host_tx->constant, &target_tx->constant);
7431     __put_user(host_tx->precision, &target_tx->precision);
7432     __put_user(host_tx->tolerance, &target_tx->tolerance);
7433     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7434     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7435     __put_user(host_tx->tick, &target_tx->tick);
7436     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7437     __put_user(host_tx->jitter, &target_tx->jitter);
7438     __put_user(host_tx->shift, &target_tx->shift);
7439     __put_user(host_tx->stabil, &target_tx->stabil);
7440     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7441     __put_user(host_tx->calcnt, &target_tx->calcnt);
7442     __put_user(host_tx->errcnt, &target_tx->errcnt);
7443     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7444     __put_user(host_tx->tai, &target_tx->tai);
7445 
7446     unlock_user_struct(target_tx, target_addr, 1);
7447     return 0;
7448 }
7449 #endif
7450 
7451 
7452 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7453 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7454                                               abi_long target_addr)
7455 {
7456     struct target__kernel_timex *target_tx;
7457 
7458     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7459                                  offsetof(struct target__kernel_timex,
7460                                           time))) {
7461         return -TARGET_EFAULT;
7462     }
7463 
7464     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7465         return -TARGET_EFAULT;
7466     }
7467 
7468     __get_user(host_tx->modes, &target_tx->modes);
7469     __get_user(host_tx->offset, &target_tx->offset);
7470     __get_user(host_tx->freq, &target_tx->freq);
7471     __get_user(host_tx->maxerror, &target_tx->maxerror);
7472     __get_user(host_tx->esterror, &target_tx->esterror);
7473     __get_user(host_tx->status, &target_tx->status);
7474     __get_user(host_tx->constant, &target_tx->constant);
7475     __get_user(host_tx->precision, &target_tx->precision);
7476     __get_user(host_tx->tolerance, &target_tx->tolerance);
7477     __get_user(host_tx->tick, &target_tx->tick);
7478     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7479     __get_user(host_tx->jitter, &target_tx->jitter);
7480     __get_user(host_tx->shift, &target_tx->shift);
7481     __get_user(host_tx->stabil, &target_tx->stabil);
7482     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7483     __get_user(host_tx->calcnt, &target_tx->calcnt);
7484     __get_user(host_tx->errcnt, &target_tx->errcnt);
7485     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7486     __get_user(host_tx->tai, &target_tx->tai);
7487 
7488     unlock_user_struct(target_tx, target_addr, 0);
7489     return 0;
7490 }
7491 
7492 static inline abi_long host_to_target_timex64(abi_long target_addr,
7493                                               struct timex *host_tx)
7494 {
7495     struct target__kernel_timex *target_tx;
7496 
7497    if (copy_to_user_timeval64(target_addr +
7498                               offsetof(struct target__kernel_timex, time),
7499                               &host_tx->time)) {
7500         return -TARGET_EFAULT;
7501     }
7502 
7503     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7504         return -TARGET_EFAULT;
7505     }
7506 
7507     __put_user(host_tx->modes, &target_tx->modes);
7508     __put_user(host_tx->offset, &target_tx->offset);
7509     __put_user(host_tx->freq, &target_tx->freq);
7510     __put_user(host_tx->maxerror, &target_tx->maxerror);
7511     __put_user(host_tx->esterror, &target_tx->esterror);
7512     __put_user(host_tx->status, &target_tx->status);
7513     __put_user(host_tx->constant, &target_tx->constant);
7514     __put_user(host_tx->precision, &target_tx->precision);
7515     __put_user(host_tx->tolerance, &target_tx->tolerance);
7516     __put_user(host_tx->tick, &target_tx->tick);
7517     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7518     __put_user(host_tx->jitter, &target_tx->jitter);
7519     __put_user(host_tx->shift, &target_tx->shift);
7520     __put_user(host_tx->stabil, &target_tx->stabil);
7521     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7522     __put_user(host_tx->calcnt, &target_tx->calcnt);
7523     __put_user(host_tx->errcnt, &target_tx->errcnt);
7524     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7525     __put_user(host_tx->tai, &target_tx->tai);
7526 
7527     unlock_user_struct(target_tx, target_addr, 1);
7528     return 0;
7529 }
7530 #endif
7531 
7532 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7533 #define sigev_notify_thread_id _sigev_un._tid
7534 #endif
7535 
7536 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7537                                                abi_ulong target_addr)
7538 {
7539     struct target_sigevent *target_sevp;
7540 
7541     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7542         return -TARGET_EFAULT;
7543     }
7544 
7545     /* This union is awkward on 64 bit systems because it has a 32 bit
7546      * integer and a pointer in it; we follow the conversion approach
7547      * used for handling sigval types in signal.c so the guest should get
7548      * the correct value back even if we did a 64 bit byteswap and it's
7549      * using the 32 bit integer.
7550      */
7551     host_sevp->sigev_value.sival_ptr =
7552         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7553     host_sevp->sigev_signo =
7554         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7555     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7556     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7557 
7558     unlock_user_struct(target_sevp, target_addr, 1);
7559     return 0;
7560 }
7561 
7562 #if defined(TARGET_NR_mlockall)
7563 static inline int target_to_host_mlockall_arg(int arg)
7564 {
7565     int result = 0;
7566 
7567     if (arg & TARGET_MCL_CURRENT) {
7568         result |= MCL_CURRENT;
7569     }
7570     if (arg & TARGET_MCL_FUTURE) {
7571         result |= MCL_FUTURE;
7572     }
7573 #ifdef MCL_ONFAULT
7574     if (arg & TARGET_MCL_ONFAULT) {
7575         result |= MCL_ONFAULT;
7576     }
7577 #endif
7578 
7579     return result;
7580 }
7581 #endif
7582 
7583 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7584      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7585      defined(TARGET_NR_newfstatat))
7586 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7587                                              abi_ulong target_addr,
7588                                              struct stat *host_st)
7589 {
7590 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7591     if (cpu_env->eabi) {
7592         struct target_eabi_stat64 *target_st;
7593 
7594         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7595             return -TARGET_EFAULT;
7596         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7597         __put_user(host_st->st_dev, &target_st->st_dev);
7598         __put_user(host_st->st_ino, &target_st->st_ino);
7599 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7600         __put_user(host_st->st_ino, &target_st->__st_ino);
7601 #endif
7602         __put_user(host_st->st_mode, &target_st->st_mode);
7603         __put_user(host_st->st_nlink, &target_st->st_nlink);
7604         __put_user(host_st->st_uid, &target_st->st_uid);
7605         __put_user(host_st->st_gid, &target_st->st_gid);
7606         __put_user(host_st->st_rdev, &target_st->st_rdev);
7607         __put_user(host_st->st_size, &target_st->st_size);
7608         __put_user(host_st->st_blksize, &target_st->st_blksize);
7609         __put_user(host_st->st_blocks, &target_st->st_blocks);
7610         __put_user(host_st->st_atime, &target_st->target_st_atime);
7611         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7612         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7613 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7614         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7615         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7616         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7617 #endif
7618         unlock_user_struct(target_st, target_addr, 1);
7619     } else
7620 #endif
7621     {
7622 #if defined(TARGET_HAS_STRUCT_STAT64)
7623         struct target_stat64 *target_st;
7624 #else
7625         struct target_stat *target_st;
7626 #endif
7627 
7628         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7629             return -TARGET_EFAULT;
7630         memset(target_st, 0, sizeof(*target_st));
7631         __put_user(host_st->st_dev, &target_st->st_dev);
7632         __put_user(host_st->st_ino, &target_st->st_ino);
7633 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7634         __put_user(host_st->st_ino, &target_st->__st_ino);
7635 #endif
7636         __put_user(host_st->st_mode, &target_st->st_mode);
7637         __put_user(host_st->st_nlink, &target_st->st_nlink);
7638         __put_user(host_st->st_uid, &target_st->st_uid);
7639         __put_user(host_st->st_gid, &target_st->st_gid);
7640         __put_user(host_st->st_rdev, &target_st->st_rdev);
7641         /* XXX: better use of kernel struct */
7642         __put_user(host_st->st_size, &target_st->st_size);
7643         __put_user(host_st->st_blksize, &target_st->st_blksize);
7644         __put_user(host_st->st_blocks, &target_st->st_blocks);
7645         __put_user(host_st->st_atime, &target_st->target_st_atime);
7646         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7647         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7648 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7649         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7650         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7651         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7652 #endif
7653         unlock_user_struct(target_st, target_addr, 1);
7654     }
7655 
7656     return 0;
7657 }
7658 #endif
7659 
7660 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7661 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7662                                             abi_ulong target_addr)
7663 {
7664     struct target_statx *target_stx;
7665 
7666     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7667         return -TARGET_EFAULT;
7668     }
7669     memset(target_stx, 0, sizeof(*target_stx));
7670 
7671     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7672     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7673     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7674     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7675     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7676     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7677     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7678     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7679     __put_user(host_stx->stx_size, &target_stx->stx_size);
7680     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7681     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7682     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7683     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7684     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7685     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7686     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7687     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7688     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7689     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7690     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7691     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7692     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7693     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7694 
7695     unlock_user_struct(target_stx, target_addr, 1);
7696 
7697     return 0;
7698 }
7699 #endif
7700 
7701 static int do_sys_futex(int *uaddr, int op, int val,
7702                          const struct timespec *timeout, int *uaddr2,
7703                          int val3)
7704 {
7705 #if HOST_LONG_BITS == 64
7706 #if defined(__NR_futex)
7707     /* always a 64-bit time_t, it doesn't define _time64 version  */
7708     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7709 
7710 #endif
7711 #else /* HOST_LONG_BITS == 64 */
7712 #if defined(__NR_futex_time64)
7713     if (sizeof(timeout->tv_sec) == 8) {
7714         /* _time64 function on 32bit arch */
7715         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7716     }
7717 #endif
7718 #if defined(__NR_futex)
7719     /* old function on 32bit arch */
7720     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7721 #endif
7722 #endif /* HOST_LONG_BITS == 64 */
7723     g_assert_not_reached();
7724 }
7725 
7726 static int do_safe_futex(int *uaddr, int op, int val,
7727                          const struct timespec *timeout, int *uaddr2,
7728                          int val3)
7729 {
7730 #if HOST_LONG_BITS == 64
7731 #if defined(__NR_futex)
7732     /* always a 64-bit time_t, it doesn't define _time64 version  */
7733     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7734 #endif
7735 #else /* HOST_LONG_BITS == 64 */
7736 #if defined(__NR_futex_time64)
7737     if (sizeof(timeout->tv_sec) == 8) {
7738         /* _time64 function on 32bit arch */
7739         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7740                                            val3));
7741     }
7742 #endif
7743 #if defined(__NR_futex)
7744     /* old function on 32bit arch */
7745     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7746 #endif
7747 #endif /* HOST_LONG_BITS == 64 */
7748     return -TARGET_ENOSYS;
7749 }
7750 
7751 /* ??? Using host futex calls even when target atomic operations
7752    are not really atomic probably breaks things.  However implementing
7753    futexes locally would make futexes shared between multiple processes
7754    tricky.  However they're probably useless because guest atomic
7755    operations won't work either.  */
7756 #if defined(TARGET_NR_futex)
7757 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7758                     target_ulong timeout, target_ulong uaddr2, int val3)
7759 {
7760     struct timespec ts, *pts;
7761     int base_op;
7762 
7763     /* ??? We assume FUTEX_* constants are the same on both host
7764        and target.  */
7765 #ifdef FUTEX_CMD_MASK
7766     base_op = op & FUTEX_CMD_MASK;
7767 #else
7768     base_op = op;
7769 #endif
7770     switch (base_op) {
7771     case FUTEX_WAIT:
7772     case FUTEX_WAIT_BITSET:
7773         if (timeout) {
7774             pts = &ts;
7775             target_to_host_timespec(pts, timeout);
7776         } else {
7777             pts = NULL;
7778         }
7779         return do_safe_futex(g2h(cpu, uaddr),
7780                              op, tswap32(val), pts, NULL, val3);
7781     case FUTEX_WAKE:
7782         return do_safe_futex(g2h(cpu, uaddr),
7783                              op, val, NULL, NULL, 0);
7784     case FUTEX_FD:
7785         return do_safe_futex(g2h(cpu, uaddr),
7786                              op, val, NULL, NULL, 0);
7787     case FUTEX_REQUEUE:
7788     case FUTEX_CMP_REQUEUE:
7789     case FUTEX_WAKE_OP:
7790         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7791            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7792            But the prototype takes a `struct timespec *'; insert casts
7793            to satisfy the compiler.  We do not need to tswap TIMEOUT
7794            since it's not compared to guest memory.  */
7795         pts = (struct timespec *)(uintptr_t) timeout;
7796         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7797                              (base_op == FUTEX_CMP_REQUEUE
7798                               ? tswap32(val3) : val3));
7799     default:
7800         return -TARGET_ENOSYS;
7801     }
7802 }
7803 #endif
7804 
7805 #if defined(TARGET_NR_futex_time64)
7806 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7807                            int val, target_ulong timeout,
7808                            target_ulong uaddr2, int val3)
7809 {
7810     struct timespec ts, *pts;
7811     int base_op;
7812 
7813     /* ??? We assume FUTEX_* constants are the same on both host
7814        and target.  */
7815 #ifdef FUTEX_CMD_MASK
7816     base_op = op & FUTEX_CMD_MASK;
7817 #else
7818     base_op = op;
7819 #endif
7820     switch (base_op) {
7821     case FUTEX_WAIT:
7822     case FUTEX_WAIT_BITSET:
7823         if (timeout) {
7824             pts = &ts;
7825             if (target_to_host_timespec64(pts, timeout)) {
7826                 return -TARGET_EFAULT;
7827             }
7828         } else {
7829             pts = NULL;
7830         }
7831         return do_safe_futex(g2h(cpu, uaddr), op,
7832                              tswap32(val), pts, NULL, val3);
7833     case FUTEX_WAKE:
7834         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7835     case FUTEX_FD:
7836         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7837     case FUTEX_REQUEUE:
7838     case FUTEX_CMP_REQUEUE:
7839     case FUTEX_WAKE_OP:
7840         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7841            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7842            But the prototype takes a `struct timespec *'; insert casts
7843            to satisfy the compiler.  We do not need to tswap TIMEOUT
7844            since it's not compared to guest memory.  */
7845         pts = (struct timespec *)(uintptr_t) timeout;
7846         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7847                              (base_op == FUTEX_CMP_REQUEUE
7848                               ? tswap32(val3) : val3));
7849     default:
7850         return -TARGET_ENOSYS;
7851     }
7852 }
7853 #endif
7854 
7855 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7856 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7857                                      abi_long handle, abi_long mount_id,
7858                                      abi_long flags)
7859 {
7860     struct file_handle *target_fh;
7861     struct file_handle *fh;
7862     int mid = 0;
7863     abi_long ret;
7864     char *name;
7865     unsigned int size, total_size;
7866 
7867     if (get_user_s32(size, handle)) {
7868         return -TARGET_EFAULT;
7869     }
7870 
7871     name = lock_user_string(pathname);
7872     if (!name) {
7873         return -TARGET_EFAULT;
7874     }
7875 
7876     total_size = sizeof(struct file_handle) + size;
7877     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7878     if (!target_fh) {
7879         unlock_user(name, pathname, 0);
7880         return -TARGET_EFAULT;
7881     }
7882 
7883     fh = g_malloc0(total_size);
7884     fh->handle_bytes = size;
7885 
7886     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7887     unlock_user(name, pathname, 0);
7888 
7889     /* man name_to_handle_at(2):
7890      * Other than the use of the handle_bytes field, the caller should treat
7891      * the file_handle structure as an opaque data type
7892      */
7893 
7894     memcpy(target_fh, fh, total_size);
7895     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7896     target_fh->handle_type = tswap32(fh->handle_type);
7897     g_free(fh);
7898     unlock_user(target_fh, handle, total_size);
7899 
7900     if (put_user_s32(mid, mount_id)) {
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     return ret;
7905 
7906 }
7907 #endif
7908 
7909 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7910 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7911                                      abi_long flags)
7912 {
7913     struct file_handle *target_fh;
7914     struct file_handle *fh;
7915     unsigned int size, total_size;
7916     abi_long ret;
7917 
7918     if (get_user_s32(size, handle)) {
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     total_size = sizeof(struct file_handle) + size;
7923     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7924     if (!target_fh) {
7925         return -TARGET_EFAULT;
7926     }
7927 
7928     fh = g_memdup(target_fh, total_size);
7929     fh->handle_bytes = size;
7930     fh->handle_type = tswap32(target_fh->handle_type);
7931 
7932     ret = get_errno(open_by_handle_at(mount_fd, fh,
7933                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7934 
7935     g_free(fh);
7936 
7937     unlock_user(target_fh, handle, total_size);
7938 
7939     return ret;
7940 }
7941 #endif
7942 
7943 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7944 
7945 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7946 {
7947     int host_flags;
7948     target_sigset_t *target_mask;
7949     sigset_t host_mask;
7950     abi_long ret;
7951 
7952     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7953         return -TARGET_EINVAL;
7954     }
7955     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7956         return -TARGET_EFAULT;
7957     }
7958 
7959     target_to_host_sigset(&host_mask, target_mask);
7960 
7961     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7962 
7963     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7964     if (ret >= 0) {
7965         fd_trans_register(ret, &target_signalfd_trans);
7966     }
7967 
7968     unlock_user_struct(target_mask, mask, 0);
7969 
7970     return ret;
7971 }
7972 #endif
7973 
7974 /* Map host to target signal numbers for the wait family of syscalls.
7975    Assume all other status bits are the same.  */
7976 int host_to_target_waitstatus(int status)
7977 {
7978     if (WIFSIGNALED(status)) {
7979         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7980     }
7981     if (WIFSTOPPED(status)) {
7982         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7983                | (status & 0xff);
7984     }
7985     return status;
7986 }
7987 
7988 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7989 {
7990     CPUState *cpu = env_cpu(cpu_env);
7991     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7992     int i;
7993 
7994     for (i = 0; i < bprm->argc; i++) {
7995         size_t len = strlen(bprm->argv[i]) + 1;
7996 
7997         if (write(fd, bprm->argv[i], len) != len) {
7998             return -1;
7999         }
8000     }
8001 
8002     return 0;
8003 }
8004 
8005 static int open_self_maps(CPUArchState *cpu_env, int fd)
8006 {
8007     CPUState *cpu = env_cpu(cpu_env);
8008     TaskState *ts = cpu->opaque;
8009     GSList *map_info = read_self_maps();
8010     GSList *s;
8011     int count;
8012 
8013     for (s = map_info; s; s = g_slist_next(s)) {
8014         MapInfo *e = (MapInfo *) s->data;
8015 
8016         if (h2g_valid(e->start)) {
8017             unsigned long min = e->start;
8018             unsigned long max = e->end;
8019             int flags = page_get_flags(h2g(min));
8020             const char *path;
8021 
8022             max = h2g_valid(max - 1) ?
8023                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8024 
8025             if (page_check_range(h2g(min), max - min, flags) == -1) {
8026                 continue;
8027             }
8028 
8029             if (h2g(min) == ts->info->stack_limit) {
8030                 path = "[stack]";
8031             } else {
8032                 path = e->path;
8033             }
8034 
8035             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8036                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8037                             h2g(min), h2g(max - 1) + 1,
8038                             (flags & PAGE_READ) ? 'r' : '-',
8039                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8040                             (flags & PAGE_EXEC) ? 'x' : '-',
8041                             e->is_priv ? 'p' : 's',
8042                             (uint64_t) e->offset, e->dev, e->inode);
8043             if (path) {
8044                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8045             } else {
8046                 dprintf(fd, "\n");
8047             }
8048         }
8049     }
8050 
8051     free_self_maps(map_info);
8052 
8053 #ifdef TARGET_VSYSCALL_PAGE
8054     /*
8055      * We only support execution from the vsyscall page.
8056      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8057      */
8058     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8059                     " --xp 00000000 00:00 0",
8060                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8061     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8062 #endif
8063 
8064     return 0;
8065 }
8066 
8067 static int open_self_stat(CPUArchState *cpu_env, int fd)
8068 {
8069     CPUState *cpu = env_cpu(cpu_env);
8070     TaskState *ts = cpu->opaque;
8071     g_autoptr(GString) buf = g_string_new(NULL);
8072     int i;
8073 
8074     for (i = 0; i < 44; i++) {
8075         if (i == 0) {
8076             /* pid */
8077             g_string_printf(buf, FMT_pid " ", getpid());
8078         } else if (i == 1) {
8079             /* app name */
8080             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8081             bin = bin ? bin + 1 : ts->bprm->argv[0];
8082             g_string_printf(buf, "(%.15s) ", bin);
8083         } else if (i == 3) {
8084             /* ppid */
8085             g_string_printf(buf, FMT_pid " ", getppid());
8086         } else if (i == 21) {
8087             /* starttime */
8088             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8089         } else if (i == 27) {
8090             /* stack bottom */
8091             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8092         } else {
8093             /* for the rest, there is MasterCard */
8094             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8095         }
8096 
8097         if (write(fd, buf->str, buf->len) != buf->len) {
8098             return -1;
8099         }
8100     }
8101 
8102     return 0;
8103 }
8104 
8105 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8106 {
8107     CPUState *cpu = env_cpu(cpu_env);
8108     TaskState *ts = cpu->opaque;
8109     abi_ulong auxv = ts->info->saved_auxv;
8110     abi_ulong len = ts->info->auxv_len;
8111     char *ptr;
8112 
8113     /*
8114      * Auxiliary vector is stored in target process stack.
8115      * read in whole auxv vector and copy it to file
8116      */
8117     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8118     if (ptr != NULL) {
8119         while (len > 0) {
8120             ssize_t r;
8121             r = write(fd, ptr, len);
8122             if (r <= 0) {
8123                 break;
8124             }
8125             len -= r;
8126             ptr += r;
8127         }
8128         lseek(fd, 0, SEEK_SET);
8129         unlock_user(ptr, auxv, len);
8130     }
8131 
8132     return 0;
8133 }
8134 
8135 static int is_proc_myself(const char *filename, const char *entry)
8136 {
8137     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8138         filename += strlen("/proc/");
8139         if (!strncmp(filename, "self/", strlen("self/"))) {
8140             filename += strlen("self/");
8141         } else if (*filename >= '1' && *filename <= '9') {
8142             char myself[80];
8143             snprintf(myself, sizeof(myself), "%d/", getpid());
8144             if (!strncmp(filename, myself, strlen(myself))) {
8145                 filename += strlen(myself);
8146             } else {
8147                 return 0;
8148             }
8149         } else {
8150             return 0;
8151         }
8152         if (!strcmp(filename, entry)) {
8153             return 1;
8154         }
8155     }
8156     return 0;
8157 }
8158 
8159 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8160     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8161 static int is_proc(const char *filename, const char *entry)
8162 {
8163     return strcmp(filename, entry) == 0;
8164 }
8165 #endif
8166 
8167 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8168 static int open_net_route(CPUArchState *cpu_env, int fd)
8169 {
8170     FILE *fp;
8171     char *line = NULL;
8172     size_t len = 0;
8173     ssize_t read;
8174 
8175     fp = fopen("/proc/net/route", "r");
8176     if (fp == NULL) {
8177         return -1;
8178     }
8179 
8180     /* read header */
8181 
8182     read = getline(&line, &len, fp);
8183     dprintf(fd, "%s", line);
8184 
8185     /* read routes */
8186 
8187     while ((read = getline(&line, &len, fp)) != -1) {
8188         char iface[16];
8189         uint32_t dest, gw, mask;
8190         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8191         int fields;
8192 
8193         fields = sscanf(line,
8194                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8195                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8196                         &mask, &mtu, &window, &irtt);
8197         if (fields != 11) {
8198             continue;
8199         }
8200         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8201                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8202                 metric, tswap32(mask), mtu, window, irtt);
8203     }
8204 
8205     free(line);
8206     fclose(fp);
8207 
8208     return 0;
8209 }
8210 #endif
8211 
8212 #if defined(TARGET_SPARC)
8213 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8214 {
8215     dprintf(fd, "type\t\t: sun4u\n");
8216     return 0;
8217 }
8218 #endif
8219 
8220 #if defined(TARGET_HPPA)
8221 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8222 {
8223     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8224     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8225     dprintf(fd, "capabilities\t: os32\n");
8226     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8227     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8228     return 0;
8229 }
8230 #endif
8231 
8232 #if defined(TARGET_M68K)
8233 static int open_hardware(CPUArchState *cpu_env, int fd)
8234 {
8235     dprintf(fd, "Model:\t\tqemu-m68k\n");
8236     return 0;
8237 }
8238 #endif
8239 
8240 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8241 {
8242     struct fake_open {
8243         const char *filename;
8244         int (*fill)(CPUArchState *cpu_env, int fd);
8245         int (*cmp)(const char *s1, const char *s2);
8246     };
8247     const struct fake_open *fake_open;
8248     static const struct fake_open fakes[] = {
8249         { "maps", open_self_maps, is_proc_myself },
8250         { "stat", open_self_stat, is_proc_myself },
8251         { "auxv", open_self_auxv, is_proc_myself },
8252         { "cmdline", open_self_cmdline, is_proc_myself },
8253 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8254         { "/proc/net/route", open_net_route, is_proc },
8255 #endif
8256 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8257         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8258 #endif
8259 #if defined(TARGET_M68K)
8260         { "/proc/hardware", open_hardware, is_proc },
8261 #endif
8262         { NULL, NULL, NULL }
8263     };
8264 
8265     if (is_proc_myself(pathname, "exe")) {
8266         int execfd = qemu_getauxval(AT_EXECFD);
8267         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8268     }
8269 
8270     for (fake_open = fakes; fake_open->filename; fake_open++) {
8271         if (fake_open->cmp(pathname, fake_open->filename)) {
8272             break;
8273         }
8274     }
8275 
8276     if (fake_open->filename) {
8277         const char *tmpdir;
8278         char filename[PATH_MAX];
8279         int fd, r;
8280 
8281         fd = memfd_create("qemu-open", 0);
8282         if (fd < 0) {
8283             if (errno != ENOSYS) {
8284                 return fd;
8285             }
8286             /* create temporary file to map stat to */
8287             tmpdir = getenv("TMPDIR");
8288             if (!tmpdir)
8289                 tmpdir = "/tmp";
8290             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8291             fd = mkstemp(filename);
8292             if (fd < 0) {
8293                 return fd;
8294             }
8295             unlink(filename);
8296         }
8297 
8298         if ((r = fake_open->fill(cpu_env, fd))) {
8299             int e = errno;
8300             close(fd);
8301             errno = e;
8302             return r;
8303         }
8304         lseek(fd, 0, SEEK_SET);
8305 
8306         return fd;
8307     }
8308 
8309     return safe_openat(dirfd, path(pathname), flags, mode);
8310 }
8311 
8312 #define TIMER_MAGIC 0x0caf0000
8313 #define TIMER_MAGIC_MASK 0xffff0000
8314 
8315 /* Convert QEMU provided timer ID back to internal 16bit index format */
8316 static target_timer_t get_timer_id(abi_long arg)
8317 {
8318     target_timer_t timerid = arg;
8319 
8320     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8321         return -TARGET_EINVAL;
8322     }
8323 
8324     timerid &= 0xffff;
8325 
8326     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8327         return -TARGET_EINVAL;
8328     }
8329 
8330     return timerid;
8331 }
8332 
8333 static int target_to_host_cpu_mask(unsigned long *host_mask,
8334                                    size_t host_size,
8335                                    abi_ulong target_addr,
8336                                    size_t target_size)
8337 {
8338     unsigned target_bits = sizeof(abi_ulong) * 8;
8339     unsigned host_bits = sizeof(*host_mask) * 8;
8340     abi_ulong *target_mask;
8341     unsigned i, j;
8342 
8343     assert(host_size >= target_size);
8344 
8345     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8346     if (!target_mask) {
8347         return -TARGET_EFAULT;
8348     }
8349     memset(host_mask, 0, host_size);
8350 
8351     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8352         unsigned bit = i * target_bits;
8353         abi_ulong val;
8354 
8355         __get_user(val, &target_mask[i]);
8356         for (j = 0; j < target_bits; j++, bit++) {
8357             if (val & (1UL << j)) {
8358                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8359             }
8360         }
8361     }
8362 
8363     unlock_user(target_mask, target_addr, 0);
8364     return 0;
8365 }
8366 
8367 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8368                                    size_t host_size,
8369                                    abi_ulong target_addr,
8370                                    size_t target_size)
8371 {
8372     unsigned target_bits = sizeof(abi_ulong) * 8;
8373     unsigned host_bits = sizeof(*host_mask) * 8;
8374     abi_ulong *target_mask;
8375     unsigned i, j;
8376 
8377     assert(host_size >= target_size);
8378 
8379     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8380     if (!target_mask) {
8381         return -TARGET_EFAULT;
8382     }
8383 
8384     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8385         unsigned bit = i * target_bits;
8386         abi_ulong val = 0;
8387 
8388         for (j = 0; j < target_bits; j++, bit++) {
8389             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8390                 val |= 1UL << j;
8391             }
8392         }
8393         __put_user(val, &target_mask[i]);
8394     }
8395 
8396     unlock_user(target_mask, target_addr, target_size);
8397     return 0;
8398 }
8399 
8400 #ifdef TARGET_NR_getdents
8401 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8402 {
8403     g_autofree void *hdirp = NULL;
8404     void *tdirp;
8405     int hlen, hoff, toff;
8406     int hreclen, treclen;
8407     off64_t prev_diroff = 0;
8408 
8409     hdirp = g_try_malloc(count);
8410     if (!hdirp) {
8411         return -TARGET_ENOMEM;
8412     }
8413 
8414 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8415     hlen = sys_getdents(dirfd, hdirp, count);
8416 #else
8417     hlen = sys_getdents64(dirfd, hdirp, count);
8418 #endif
8419 
8420     hlen = get_errno(hlen);
8421     if (is_error(hlen)) {
8422         return hlen;
8423     }
8424 
8425     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8426     if (!tdirp) {
8427         return -TARGET_EFAULT;
8428     }
8429 
8430     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8431 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8432         struct linux_dirent *hde = hdirp + hoff;
8433 #else
8434         struct linux_dirent64 *hde = hdirp + hoff;
8435 #endif
8436         struct target_dirent *tde = tdirp + toff;
8437         int namelen;
8438         uint8_t type;
8439 
8440         namelen = strlen(hde->d_name);
8441         hreclen = hde->d_reclen;
8442         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8443         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8444 
8445         if (toff + treclen > count) {
8446             /*
8447              * If the host struct is smaller than the target struct, or
8448              * requires less alignment and thus packs into less space,
8449              * then the host can return more entries than we can pass
8450              * on to the guest.
8451              */
8452             if (toff == 0) {
8453                 toff = -TARGET_EINVAL; /* result buffer is too small */
8454                 break;
8455             }
8456             /*
8457              * Return what we have, resetting the file pointer to the
8458              * location of the first record not returned.
8459              */
8460             lseek64(dirfd, prev_diroff, SEEK_SET);
8461             break;
8462         }
8463 
8464         prev_diroff = hde->d_off;
8465         tde->d_ino = tswapal(hde->d_ino);
8466         tde->d_off = tswapal(hde->d_off);
8467         tde->d_reclen = tswap16(treclen);
8468         memcpy(tde->d_name, hde->d_name, namelen + 1);
8469 
8470         /*
8471          * The getdents type is in what was formerly a padding byte at the
8472          * end of the structure.
8473          */
8474 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8475         type = *((uint8_t *)hde + hreclen - 1);
8476 #else
8477         type = hde->d_type;
8478 #endif
8479         *((uint8_t *)tde + treclen - 1) = type;
8480     }
8481 
8482     unlock_user(tdirp, arg2, toff);
8483     return toff;
8484 }
8485 #endif /* TARGET_NR_getdents */
8486 
8487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8488 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8489 {
8490     g_autofree void *hdirp = NULL;
8491     void *tdirp;
8492     int hlen, hoff, toff;
8493     int hreclen, treclen;
8494     off64_t prev_diroff = 0;
8495 
8496     hdirp = g_try_malloc(count);
8497     if (!hdirp) {
8498         return -TARGET_ENOMEM;
8499     }
8500 
8501     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8502     if (is_error(hlen)) {
8503         return hlen;
8504     }
8505 
8506     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8507     if (!tdirp) {
8508         return -TARGET_EFAULT;
8509     }
8510 
8511     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8512         struct linux_dirent64 *hde = hdirp + hoff;
8513         struct target_dirent64 *tde = tdirp + toff;
8514         int namelen;
8515 
8516         namelen = strlen(hde->d_name) + 1;
8517         hreclen = hde->d_reclen;
8518         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8519         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8520 
8521         if (toff + treclen > count) {
8522             /*
8523              * If the host struct is smaller than the target struct, or
8524              * requires less alignment and thus packs into less space,
8525              * then the host can return more entries than we can pass
8526              * on to the guest.
8527              */
8528             if (toff == 0) {
8529                 toff = -TARGET_EINVAL; /* result buffer is too small */
8530                 break;
8531             }
8532             /*
8533              * Return what we have, resetting the file pointer to the
8534              * location of the first record not returned.
8535              */
8536             lseek64(dirfd, prev_diroff, SEEK_SET);
8537             break;
8538         }
8539 
8540         prev_diroff = hde->d_off;
8541         tde->d_ino = tswap64(hde->d_ino);
8542         tde->d_off = tswap64(hde->d_off);
8543         tde->d_reclen = tswap16(treclen);
8544         tde->d_type = hde->d_type;
8545         memcpy(tde->d_name, hde->d_name, namelen);
8546     }
8547 
8548     unlock_user(tdirp, arg2, toff);
8549     return toff;
8550 }
8551 #endif /* TARGET_NR_getdents64 */
8552 
8553 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8554 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8555 #endif
8556 
8557 /* This is an internal helper for do_syscall so that it is easier
8558  * to have a single return point, so that actions, such as logging
8559  * of syscall results, can be performed.
8560  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8561  */
8562 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8563                             abi_long arg2, abi_long arg3, abi_long arg4,
8564                             abi_long arg5, abi_long arg6, abi_long arg7,
8565                             abi_long arg8)
8566 {
8567     CPUState *cpu = env_cpu(cpu_env);
8568     abi_long ret;
8569 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8570     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8571     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8572     || defined(TARGET_NR_statx)
8573     struct stat st;
8574 #endif
8575 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8576     || defined(TARGET_NR_fstatfs)
8577     struct statfs stfs;
8578 #endif
8579     void *p;
8580 
8581     switch(num) {
8582     case TARGET_NR_exit:
8583         /* In old applications this may be used to implement _exit(2).
8584            However in threaded applications it is used for thread termination,
8585            and _exit_group is used for application termination.
8586            Do thread termination if we have more then one thread.  */
8587 
8588         if (block_signals()) {
8589             return -QEMU_ERESTARTSYS;
8590         }
8591 
8592         pthread_mutex_lock(&clone_lock);
8593 
8594         if (CPU_NEXT(first_cpu)) {
8595             TaskState *ts = cpu->opaque;
8596 
8597             if (ts->child_tidptr) {
8598                 put_user_u32(0, ts->child_tidptr);
8599                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8600                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8601             }
8602 
8603             object_unparent(OBJECT(cpu));
8604             object_unref(OBJECT(cpu));
8605             /*
8606              * At this point the CPU should be unrealized and removed
8607              * from cpu lists. We can clean-up the rest of the thread
8608              * data without the lock held.
8609              */
8610 
8611             pthread_mutex_unlock(&clone_lock);
8612 
8613             thread_cpu = NULL;
8614             g_free(ts);
8615             rcu_unregister_thread();
8616             pthread_exit(NULL);
8617         }
8618 
8619         pthread_mutex_unlock(&clone_lock);
8620         preexit_cleanup(cpu_env, arg1);
8621         _exit(arg1);
8622         return 0; /* avoid warning */
8623     case TARGET_NR_read:
8624         if (arg2 == 0 && arg3 == 0) {
8625             return get_errno(safe_read(arg1, 0, 0));
8626         } else {
8627             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8628                 return -TARGET_EFAULT;
8629             ret = get_errno(safe_read(arg1, p, arg3));
8630             if (ret >= 0 &&
8631                 fd_trans_host_to_target_data(arg1)) {
8632                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8633             }
8634             unlock_user(p, arg2, ret);
8635         }
8636         return ret;
8637     case TARGET_NR_write:
8638         if (arg2 == 0 && arg3 == 0) {
8639             return get_errno(safe_write(arg1, 0, 0));
8640         }
8641         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8642             return -TARGET_EFAULT;
8643         if (fd_trans_target_to_host_data(arg1)) {
8644             void *copy = g_malloc(arg3);
8645             memcpy(copy, p, arg3);
8646             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8647             if (ret >= 0) {
8648                 ret = get_errno(safe_write(arg1, copy, ret));
8649             }
8650             g_free(copy);
8651         } else {
8652             ret = get_errno(safe_write(arg1, p, arg3));
8653         }
8654         unlock_user(p, arg2, 0);
8655         return ret;
8656 
8657 #ifdef TARGET_NR_open
8658     case TARGET_NR_open:
8659         if (!(p = lock_user_string(arg1)))
8660             return -TARGET_EFAULT;
8661         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8662                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8663                                   arg3));
8664         fd_trans_unregister(ret);
8665         unlock_user(p, arg1, 0);
8666         return ret;
8667 #endif
8668     case TARGET_NR_openat:
8669         if (!(p = lock_user_string(arg2)))
8670             return -TARGET_EFAULT;
8671         ret = get_errno(do_openat(cpu_env, arg1, p,
8672                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8673                                   arg4));
8674         fd_trans_unregister(ret);
8675         unlock_user(p, arg2, 0);
8676         return ret;
8677 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8678     case TARGET_NR_name_to_handle_at:
8679         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8680         return ret;
8681 #endif
8682 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8683     case TARGET_NR_open_by_handle_at:
8684         ret = do_open_by_handle_at(arg1, arg2, arg3);
8685         fd_trans_unregister(ret);
8686         return ret;
8687 #endif
8688     case TARGET_NR_close:
8689         fd_trans_unregister(arg1);
8690         return get_errno(close(arg1));
8691 
8692     case TARGET_NR_brk:
8693         return do_brk(arg1);
8694 #ifdef TARGET_NR_fork
8695     case TARGET_NR_fork:
8696         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8697 #endif
8698 #ifdef TARGET_NR_waitpid
8699     case TARGET_NR_waitpid:
8700         {
8701             int status;
8702             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8703             if (!is_error(ret) && arg2 && ret
8704                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8705                 return -TARGET_EFAULT;
8706         }
8707         return ret;
8708 #endif
8709 #ifdef TARGET_NR_waitid
8710     case TARGET_NR_waitid:
8711         {
8712             siginfo_t info;
8713             info.si_pid = 0;
8714             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8715             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8716                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8717                     return -TARGET_EFAULT;
8718                 host_to_target_siginfo(p, &info);
8719                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8720             }
8721         }
8722         return ret;
8723 #endif
8724 #ifdef TARGET_NR_creat /* not on alpha */
8725     case TARGET_NR_creat:
8726         if (!(p = lock_user_string(arg1)))
8727             return -TARGET_EFAULT;
8728         ret = get_errno(creat(p, arg2));
8729         fd_trans_unregister(ret);
8730         unlock_user(p, arg1, 0);
8731         return ret;
8732 #endif
8733 #ifdef TARGET_NR_link
8734     case TARGET_NR_link:
8735         {
8736             void * p2;
8737             p = lock_user_string(arg1);
8738             p2 = lock_user_string(arg2);
8739             if (!p || !p2)
8740                 ret = -TARGET_EFAULT;
8741             else
8742                 ret = get_errno(link(p, p2));
8743             unlock_user(p2, arg2, 0);
8744             unlock_user(p, arg1, 0);
8745         }
8746         return ret;
8747 #endif
8748 #if defined(TARGET_NR_linkat)
8749     case TARGET_NR_linkat:
8750         {
8751             void * p2 = NULL;
8752             if (!arg2 || !arg4)
8753                 return -TARGET_EFAULT;
8754             p  = lock_user_string(arg2);
8755             p2 = lock_user_string(arg4);
8756             if (!p || !p2)
8757                 ret = -TARGET_EFAULT;
8758             else
8759                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8760             unlock_user(p, arg2, 0);
8761             unlock_user(p2, arg4, 0);
8762         }
8763         return ret;
8764 #endif
8765 #ifdef TARGET_NR_unlink
8766     case TARGET_NR_unlink:
8767         if (!(p = lock_user_string(arg1)))
8768             return -TARGET_EFAULT;
8769         ret = get_errno(unlink(p));
8770         unlock_user(p, arg1, 0);
8771         return ret;
8772 #endif
8773 #if defined(TARGET_NR_unlinkat)
8774     case TARGET_NR_unlinkat:
8775         if (!(p = lock_user_string(arg2)))
8776             return -TARGET_EFAULT;
8777         ret = get_errno(unlinkat(arg1, p, arg3));
8778         unlock_user(p, arg2, 0);
8779         return ret;
8780 #endif
8781     case TARGET_NR_execve:
8782         {
8783             char **argp, **envp;
8784             int argc, envc;
8785             abi_ulong gp;
8786             abi_ulong guest_argp;
8787             abi_ulong guest_envp;
8788             abi_ulong addr;
8789             char **q;
8790 
8791             argc = 0;
8792             guest_argp = arg2;
8793             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8794                 if (get_user_ual(addr, gp))
8795                     return -TARGET_EFAULT;
8796                 if (!addr)
8797                     break;
8798                 argc++;
8799             }
8800             envc = 0;
8801             guest_envp = arg3;
8802             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8803                 if (get_user_ual(addr, gp))
8804                     return -TARGET_EFAULT;
8805                 if (!addr)
8806                     break;
8807                 envc++;
8808             }
8809 
8810             argp = g_new0(char *, argc + 1);
8811             envp = g_new0(char *, envc + 1);
8812 
8813             for (gp = guest_argp, q = argp; gp;
8814                   gp += sizeof(abi_ulong), q++) {
8815                 if (get_user_ual(addr, gp))
8816                     goto execve_efault;
8817                 if (!addr)
8818                     break;
8819                 if (!(*q = lock_user_string(addr)))
8820                     goto execve_efault;
8821             }
8822             *q = NULL;
8823 
8824             for (gp = guest_envp, q = envp; gp;
8825                   gp += sizeof(abi_ulong), q++) {
8826                 if (get_user_ual(addr, gp))
8827                     goto execve_efault;
8828                 if (!addr)
8829                     break;
8830                 if (!(*q = lock_user_string(addr)))
8831                     goto execve_efault;
8832             }
8833             *q = NULL;
8834 
8835             if (!(p = lock_user_string(arg1)))
8836                 goto execve_efault;
8837             /* Although execve() is not an interruptible syscall it is
8838              * a special case where we must use the safe_syscall wrapper:
8839              * if we allow a signal to happen before we make the host
8840              * syscall then we will 'lose' it, because at the point of
8841              * execve the process leaves QEMU's control. So we use the
8842              * safe syscall wrapper to ensure that we either take the
8843              * signal as a guest signal, or else it does not happen
8844              * before the execve completes and makes it the other
8845              * program's problem.
8846              */
8847             ret = get_errno(safe_execve(p, argp, envp));
8848             unlock_user(p, arg1, 0);
8849 
8850             goto execve_end;
8851 
8852         execve_efault:
8853             ret = -TARGET_EFAULT;
8854 
8855         execve_end:
8856             for (gp = guest_argp, q = argp; *q;
8857                   gp += sizeof(abi_ulong), q++) {
8858                 if (get_user_ual(addr, gp)
8859                     || !addr)
8860                     break;
8861                 unlock_user(*q, addr, 0);
8862             }
8863             for (gp = guest_envp, q = envp; *q;
8864                   gp += sizeof(abi_ulong), q++) {
8865                 if (get_user_ual(addr, gp)
8866                     || !addr)
8867                     break;
8868                 unlock_user(*q, addr, 0);
8869             }
8870 
8871             g_free(argp);
8872             g_free(envp);
8873         }
8874         return ret;
8875     case TARGET_NR_chdir:
8876         if (!(p = lock_user_string(arg1)))
8877             return -TARGET_EFAULT;
8878         ret = get_errno(chdir(p));
8879         unlock_user(p, arg1, 0);
8880         return ret;
8881 #ifdef TARGET_NR_time
8882     case TARGET_NR_time:
8883         {
8884             time_t host_time;
8885             ret = get_errno(time(&host_time));
8886             if (!is_error(ret)
8887                 && arg1
8888                 && put_user_sal(host_time, arg1))
8889                 return -TARGET_EFAULT;
8890         }
8891         return ret;
8892 #endif
8893 #ifdef TARGET_NR_mknod
8894     case TARGET_NR_mknod:
8895         if (!(p = lock_user_string(arg1)))
8896             return -TARGET_EFAULT;
8897         ret = get_errno(mknod(p, arg2, arg3));
8898         unlock_user(p, arg1, 0);
8899         return ret;
8900 #endif
8901 #if defined(TARGET_NR_mknodat)
8902     case TARGET_NR_mknodat:
8903         if (!(p = lock_user_string(arg2)))
8904             return -TARGET_EFAULT;
8905         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8906         unlock_user(p, arg2, 0);
8907         return ret;
8908 #endif
8909 #ifdef TARGET_NR_chmod
8910     case TARGET_NR_chmod:
8911         if (!(p = lock_user_string(arg1)))
8912             return -TARGET_EFAULT;
8913         ret = get_errno(chmod(p, arg2));
8914         unlock_user(p, arg1, 0);
8915         return ret;
8916 #endif
8917 #ifdef TARGET_NR_lseek
8918     case TARGET_NR_lseek:
8919         return get_errno(lseek(arg1, arg2, arg3));
8920 #endif
8921 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8922     /* Alpha specific */
8923     case TARGET_NR_getxpid:
8924         cpu_env->ir[IR_A4] = getppid();
8925         return get_errno(getpid());
8926 #endif
8927 #ifdef TARGET_NR_getpid
8928     case TARGET_NR_getpid:
8929         return get_errno(getpid());
8930 #endif
8931     case TARGET_NR_mount:
8932         {
8933             /* need to look at the data field */
8934             void *p2, *p3;
8935 
8936             if (arg1) {
8937                 p = lock_user_string(arg1);
8938                 if (!p) {
8939                     return -TARGET_EFAULT;
8940                 }
8941             } else {
8942                 p = NULL;
8943             }
8944 
8945             p2 = lock_user_string(arg2);
8946             if (!p2) {
8947                 if (arg1) {
8948                     unlock_user(p, arg1, 0);
8949                 }
8950                 return -TARGET_EFAULT;
8951             }
8952 
8953             if (arg3) {
8954                 p3 = lock_user_string(arg3);
8955                 if (!p3) {
8956                     if (arg1) {
8957                         unlock_user(p, arg1, 0);
8958                     }
8959                     unlock_user(p2, arg2, 0);
8960                     return -TARGET_EFAULT;
8961                 }
8962             } else {
8963                 p3 = NULL;
8964             }
8965 
8966             /* FIXME - arg5 should be locked, but it isn't clear how to
8967              * do that since it's not guaranteed to be a NULL-terminated
8968              * string.
8969              */
8970             if (!arg5) {
8971                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8972             } else {
8973                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8974             }
8975             ret = get_errno(ret);
8976 
8977             if (arg1) {
8978                 unlock_user(p, arg1, 0);
8979             }
8980             unlock_user(p2, arg2, 0);
8981             if (arg3) {
8982                 unlock_user(p3, arg3, 0);
8983             }
8984         }
8985         return ret;
8986 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8987 #if defined(TARGET_NR_umount)
8988     case TARGET_NR_umount:
8989 #endif
8990 #if defined(TARGET_NR_oldumount)
8991     case TARGET_NR_oldumount:
8992 #endif
8993         if (!(p = lock_user_string(arg1)))
8994             return -TARGET_EFAULT;
8995         ret = get_errno(umount(p));
8996         unlock_user(p, arg1, 0);
8997         return ret;
8998 #endif
8999 #ifdef TARGET_NR_stime /* not on alpha */
9000     case TARGET_NR_stime:
9001         {
9002             struct timespec ts;
9003             ts.tv_nsec = 0;
9004             if (get_user_sal(ts.tv_sec, arg1)) {
9005                 return -TARGET_EFAULT;
9006             }
9007             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9008         }
9009 #endif
9010 #ifdef TARGET_NR_alarm /* not on alpha */
9011     case TARGET_NR_alarm:
9012         return alarm(arg1);
9013 #endif
9014 #ifdef TARGET_NR_pause /* not on alpha */
9015     case TARGET_NR_pause:
9016         if (!block_signals()) {
9017             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9018         }
9019         return -TARGET_EINTR;
9020 #endif
9021 #ifdef TARGET_NR_utime
9022     case TARGET_NR_utime:
9023         {
9024             struct utimbuf tbuf, *host_tbuf;
9025             struct target_utimbuf *target_tbuf;
9026             if (arg2) {
9027                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9028                     return -TARGET_EFAULT;
9029                 tbuf.actime = tswapal(target_tbuf->actime);
9030                 tbuf.modtime = tswapal(target_tbuf->modtime);
9031                 unlock_user_struct(target_tbuf, arg2, 0);
9032                 host_tbuf = &tbuf;
9033             } else {
9034                 host_tbuf = NULL;
9035             }
9036             if (!(p = lock_user_string(arg1)))
9037                 return -TARGET_EFAULT;
9038             ret = get_errno(utime(p, host_tbuf));
9039             unlock_user(p, arg1, 0);
9040         }
9041         return ret;
9042 #endif
9043 #ifdef TARGET_NR_utimes
9044     case TARGET_NR_utimes:
9045         {
9046             struct timeval *tvp, tv[2];
9047             if (arg2) {
9048                 if (copy_from_user_timeval(&tv[0], arg2)
9049                     || copy_from_user_timeval(&tv[1],
9050                                               arg2 + sizeof(struct target_timeval)))
9051                     return -TARGET_EFAULT;
9052                 tvp = tv;
9053             } else {
9054                 tvp = NULL;
9055             }
9056             if (!(p = lock_user_string(arg1)))
9057                 return -TARGET_EFAULT;
9058             ret = get_errno(utimes(p, tvp));
9059             unlock_user(p, arg1, 0);
9060         }
9061         return ret;
9062 #endif
9063 #if defined(TARGET_NR_futimesat)
9064     case TARGET_NR_futimesat:
9065         {
9066             struct timeval *tvp, tv[2];
9067             if (arg3) {
9068                 if (copy_from_user_timeval(&tv[0], arg3)
9069                     || copy_from_user_timeval(&tv[1],
9070                                               arg3 + sizeof(struct target_timeval)))
9071                     return -TARGET_EFAULT;
9072                 tvp = tv;
9073             } else {
9074                 tvp = NULL;
9075             }
9076             if (!(p = lock_user_string(arg2))) {
9077                 return -TARGET_EFAULT;
9078             }
9079             ret = get_errno(futimesat(arg1, path(p), tvp));
9080             unlock_user(p, arg2, 0);
9081         }
9082         return ret;
9083 #endif
9084 #ifdef TARGET_NR_access
9085     case TARGET_NR_access:
9086         if (!(p = lock_user_string(arg1))) {
9087             return -TARGET_EFAULT;
9088         }
9089         ret = get_errno(access(path(p), arg2));
9090         unlock_user(p, arg1, 0);
9091         return ret;
9092 #endif
9093 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9094     case TARGET_NR_faccessat:
9095         if (!(p = lock_user_string(arg2))) {
9096             return -TARGET_EFAULT;
9097         }
9098         ret = get_errno(faccessat(arg1, p, arg3, 0));
9099         unlock_user(p, arg2, 0);
9100         return ret;
9101 #endif
9102 #ifdef TARGET_NR_nice /* not on alpha */
9103     case TARGET_NR_nice:
9104         return get_errno(nice(arg1));
9105 #endif
9106     case TARGET_NR_sync:
9107         sync();
9108         return 0;
9109 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9110     case TARGET_NR_syncfs:
9111         return get_errno(syncfs(arg1));
9112 #endif
9113     case TARGET_NR_kill:
9114         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9115 #ifdef TARGET_NR_rename
9116     case TARGET_NR_rename:
9117         {
9118             void *p2;
9119             p = lock_user_string(arg1);
9120             p2 = lock_user_string(arg2);
9121             if (!p || !p2)
9122                 ret = -TARGET_EFAULT;
9123             else
9124                 ret = get_errno(rename(p, p2));
9125             unlock_user(p2, arg2, 0);
9126             unlock_user(p, arg1, 0);
9127         }
9128         return ret;
9129 #endif
9130 #if defined(TARGET_NR_renameat)
9131     case TARGET_NR_renameat:
9132         {
9133             void *p2;
9134             p  = lock_user_string(arg2);
9135             p2 = lock_user_string(arg4);
9136             if (!p || !p2)
9137                 ret = -TARGET_EFAULT;
9138             else
9139                 ret = get_errno(renameat(arg1, p, arg3, p2));
9140             unlock_user(p2, arg4, 0);
9141             unlock_user(p, arg2, 0);
9142         }
9143         return ret;
9144 #endif
9145 #if defined(TARGET_NR_renameat2)
9146     case TARGET_NR_renameat2:
9147         {
9148             void *p2;
9149             p  = lock_user_string(arg2);
9150             p2 = lock_user_string(arg4);
9151             if (!p || !p2) {
9152                 ret = -TARGET_EFAULT;
9153             } else {
9154                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9155             }
9156             unlock_user(p2, arg4, 0);
9157             unlock_user(p, arg2, 0);
9158         }
9159         return ret;
9160 #endif
9161 #ifdef TARGET_NR_mkdir
9162     case TARGET_NR_mkdir:
9163         if (!(p = lock_user_string(arg1)))
9164             return -TARGET_EFAULT;
9165         ret = get_errno(mkdir(p, arg2));
9166         unlock_user(p, arg1, 0);
9167         return ret;
9168 #endif
9169 #if defined(TARGET_NR_mkdirat)
9170     case TARGET_NR_mkdirat:
9171         if (!(p = lock_user_string(arg2)))
9172             return -TARGET_EFAULT;
9173         ret = get_errno(mkdirat(arg1, p, arg3));
9174         unlock_user(p, arg2, 0);
9175         return ret;
9176 #endif
9177 #ifdef TARGET_NR_rmdir
9178     case TARGET_NR_rmdir:
9179         if (!(p = lock_user_string(arg1)))
9180             return -TARGET_EFAULT;
9181         ret = get_errno(rmdir(p));
9182         unlock_user(p, arg1, 0);
9183         return ret;
9184 #endif
9185     case TARGET_NR_dup:
9186         ret = get_errno(dup(arg1));
9187         if (ret >= 0) {
9188             fd_trans_dup(arg1, ret);
9189         }
9190         return ret;
9191 #ifdef TARGET_NR_pipe
9192     case TARGET_NR_pipe:
9193         return do_pipe(cpu_env, arg1, 0, 0);
9194 #endif
9195 #ifdef TARGET_NR_pipe2
9196     case TARGET_NR_pipe2:
9197         return do_pipe(cpu_env, arg1,
9198                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9199 #endif
9200     case TARGET_NR_times:
9201         {
9202             struct target_tms *tmsp;
9203             struct tms tms;
9204             ret = get_errno(times(&tms));
9205             if (arg1) {
9206                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9207                 if (!tmsp)
9208                     return -TARGET_EFAULT;
9209                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9210                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9211                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9212                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9213             }
9214             if (!is_error(ret))
9215                 ret = host_to_target_clock_t(ret);
9216         }
9217         return ret;
9218     case TARGET_NR_acct:
9219         if (arg1 == 0) {
9220             ret = get_errno(acct(NULL));
9221         } else {
9222             if (!(p = lock_user_string(arg1))) {
9223                 return -TARGET_EFAULT;
9224             }
9225             ret = get_errno(acct(path(p)));
9226             unlock_user(p, arg1, 0);
9227         }
9228         return ret;
9229 #ifdef TARGET_NR_umount2
9230     case TARGET_NR_umount2:
9231         if (!(p = lock_user_string(arg1)))
9232             return -TARGET_EFAULT;
9233         ret = get_errno(umount2(p, arg2));
9234         unlock_user(p, arg1, 0);
9235         return ret;
9236 #endif
9237     case TARGET_NR_ioctl:
9238         return do_ioctl(arg1, arg2, arg3);
9239 #ifdef TARGET_NR_fcntl
9240     case TARGET_NR_fcntl:
9241         return do_fcntl(arg1, arg2, arg3);
9242 #endif
9243     case TARGET_NR_setpgid:
9244         return get_errno(setpgid(arg1, arg2));
9245     case TARGET_NR_umask:
9246         return get_errno(umask(arg1));
9247     case TARGET_NR_chroot:
9248         if (!(p = lock_user_string(arg1)))
9249             return -TARGET_EFAULT;
9250         ret = get_errno(chroot(p));
9251         unlock_user(p, arg1, 0);
9252         return ret;
9253 #ifdef TARGET_NR_dup2
9254     case TARGET_NR_dup2:
9255         ret = get_errno(dup2(arg1, arg2));
9256         if (ret >= 0) {
9257             fd_trans_dup(arg1, arg2);
9258         }
9259         return ret;
9260 #endif
9261 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9262     case TARGET_NR_dup3:
9263     {
9264         int host_flags;
9265 
9266         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9267             return -EINVAL;
9268         }
9269         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9270         ret = get_errno(dup3(arg1, arg2, host_flags));
9271         if (ret >= 0) {
9272             fd_trans_dup(arg1, arg2);
9273         }
9274         return ret;
9275     }
9276 #endif
9277 #ifdef TARGET_NR_getppid /* not on alpha */
9278     case TARGET_NR_getppid:
9279         return get_errno(getppid());
9280 #endif
9281 #ifdef TARGET_NR_getpgrp
9282     case TARGET_NR_getpgrp:
9283         return get_errno(getpgrp());
9284 #endif
9285     case TARGET_NR_setsid:
9286         return get_errno(setsid());
9287 #ifdef TARGET_NR_sigaction
9288     case TARGET_NR_sigaction:
9289         {
9290 #if defined(TARGET_MIPS)
9291 	    struct target_sigaction act, oact, *pact, *old_act;
9292 
9293 	    if (arg2) {
9294                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9295                     return -TARGET_EFAULT;
9296 		act._sa_handler = old_act->_sa_handler;
9297 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9298 		act.sa_flags = old_act->sa_flags;
9299 		unlock_user_struct(old_act, arg2, 0);
9300 		pact = &act;
9301 	    } else {
9302 		pact = NULL;
9303 	    }
9304 
9305         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9306 
9307 	    if (!is_error(ret) && arg3) {
9308                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9309                     return -TARGET_EFAULT;
9310 		old_act->_sa_handler = oact._sa_handler;
9311 		old_act->sa_flags = oact.sa_flags;
9312 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9313 		old_act->sa_mask.sig[1] = 0;
9314 		old_act->sa_mask.sig[2] = 0;
9315 		old_act->sa_mask.sig[3] = 0;
9316 		unlock_user_struct(old_act, arg3, 1);
9317 	    }
9318 #else
9319             struct target_old_sigaction *old_act;
9320             struct target_sigaction act, oact, *pact;
9321             if (arg2) {
9322                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9323                     return -TARGET_EFAULT;
9324                 act._sa_handler = old_act->_sa_handler;
9325                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9326                 act.sa_flags = old_act->sa_flags;
9327 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9328                 act.sa_restorer = old_act->sa_restorer;
9329 #endif
9330                 unlock_user_struct(old_act, arg2, 0);
9331                 pact = &act;
9332             } else {
9333                 pact = NULL;
9334             }
9335             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9336             if (!is_error(ret) && arg3) {
9337                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9338                     return -TARGET_EFAULT;
9339                 old_act->_sa_handler = oact._sa_handler;
9340                 old_act->sa_mask = oact.sa_mask.sig[0];
9341                 old_act->sa_flags = oact.sa_flags;
9342 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9343                 old_act->sa_restorer = oact.sa_restorer;
9344 #endif
9345                 unlock_user_struct(old_act, arg3, 1);
9346             }
9347 #endif
9348         }
9349         return ret;
9350 #endif
9351     case TARGET_NR_rt_sigaction:
9352         {
9353             /*
9354              * For Alpha and SPARC this is a 5 argument syscall, with
9355              * a 'restorer' parameter which must be copied into the
9356              * sa_restorer field of the sigaction struct.
9357              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9358              * and arg5 is the sigsetsize.
9359              */
9360 #if defined(TARGET_ALPHA)
9361             target_ulong sigsetsize = arg4;
9362             target_ulong restorer = arg5;
9363 #elif defined(TARGET_SPARC)
9364             target_ulong restorer = arg4;
9365             target_ulong sigsetsize = arg5;
9366 #else
9367             target_ulong sigsetsize = arg4;
9368             target_ulong restorer = 0;
9369 #endif
9370             struct target_sigaction *act = NULL;
9371             struct target_sigaction *oact = NULL;
9372 
9373             if (sigsetsize != sizeof(target_sigset_t)) {
9374                 return -TARGET_EINVAL;
9375             }
9376             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9377                 return -TARGET_EFAULT;
9378             }
9379             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9380                 ret = -TARGET_EFAULT;
9381             } else {
9382                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9383                 if (oact) {
9384                     unlock_user_struct(oact, arg3, 1);
9385                 }
9386             }
9387             if (act) {
9388                 unlock_user_struct(act, arg2, 0);
9389             }
9390         }
9391         return ret;
9392 #ifdef TARGET_NR_sgetmask /* not on alpha */
9393     case TARGET_NR_sgetmask:
9394         {
9395             sigset_t cur_set;
9396             abi_ulong target_set;
9397             ret = do_sigprocmask(0, NULL, &cur_set);
9398             if (!ret) {
9399                 host_to_target_old_sigset(&target_set, &cur_set);
9400                 ret = target_set;
9401             }
9402         }
9403         return ret;
9404 #endif
9405 #ifdef TARGET_NR_ssetmask /* not on alpha */
9406     case TARGET_NR_ssetmask:
9407         {
9408             sigset_t set, oset;
9409             abi_ulong target_set = arg1;
9410             target_to_host_old_sigset(&set, &target_set);
9411             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9412             if (!ret) {
9413                 host_to_target_old_sigset(&target_set, &oset);
9414                 ret = target_set;
9415             }
9416         }
9417         return ret;
9418 #endif
9419 #ifdef TARGET_NR_sigprocmask
9420     case TARGET_NR_sigprocmask:
9421         {
9422 #if defined(TARGET_ALPHA)
9423             sigset_t set, oldset;
9424             abi_ulong mask;
9425             int how;
9426 
9427             switch (arg1) {
9428             case TARGET_SIG_BLOCK:
9429                 how = SIG_BLOCK;
9430                 break;
9431             case TARGET_SIG_UNBLOCK:
9432                 how = SIG_UNBLOCK;
9433                 break;
9434             case TARGET_SIG_SETMASK:
9435                 how = SIG_SETMASK;
9436                 break;
9437             default:
9438                 return -TARGET_EINVAL;
9439             }
9440             mask = arg2;
9441             target_to_host_old_sigset(&set, &mask);
9442 
9443             ret = do_sigprocmask(how, &set, &oldset);
9444             if (!is_error(ret)) {
9445                 host_to_target_old_sigset(&mask, &oldset);
9446                 ret = mask;
9447                 cpu_env->ir[IR_V0] = 0; /* force no error */
9448             }
9449 #else
9450             sigset_t set, oldset, *set_ptr;
9451             int how;
9452 
9453             if (arg2) {
9454                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9455                 if (!p) {
9456                     return -TARGET_EFAULT;
9457                 }
9458                 target_to_host_old_sigset(&set, p);
9459                 unlock_user(p, arg2, 0);
9460                 set_ptr = &set;
9461                 switch (arg1) {
9462                 case TARGET_SIG_BLOCK:
9463                     how = SIG_BLOCK;
9464                     break;
9465                 case TARGET_SIG_UNBLOCK:
9466                     how = SIG_UNBLOCK;
9467                     break;
9468                 case TARGET_SIG_SETMASK:
9469                     how = SIG_SETMASK;
9470                     break;
9471                 default:
9472                     return -TARGET_EINVAL;
9473                 }
9474             } else {
9475                 how = 0;
9476                 set_ptr = NULL;
9477             }
9478             ret = do_sigprocmask(how, set_ptr, &oldset);
9479             if (!is_error(ret) && arg3) {
9480                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9481                     return -TARGET_EFAULT;
9482                 host_to_target_old_sigset(p, &oldset);
9483                 unlock_user(p, arg3, sizeof(target_sigset_t));
9484             }
9485 #endif
9486         }
9487         return ret;
9488 #endif
9489     case TARGET_NR_rt_sigprocmask:
9490         {
9491             int how = arg1;
9492             sigset_t set, oldset, *set_ptr;
9493 
9494             if (arg4 != sizeof(target_sigset_t)) {
9495                 return -TARGET_EINVAL;
9496             }
9497 
9498             if (arg2) {
9499                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9500                 if (!p) {
9501                     return -TARGET_EFAULT;
9502                 }
9503                 target_to_host_sigset(&set, p);
9504                 unlock_user(p, arg2, 0);
9505                 set_ptr = &set;
9506                 switch(how) {
9507                 case TARGET_SIG_BLOCK:
9508                     how = SIG_BLOCK;
9509                     break;
9510                 case TARGET_SIG_UNBLOCK:
9511                     how = SIG_UNBLOCK;
9512                     break;
9513                 case TARGET_SIG_SETMASK:
9514                     how = SIG_SETMASK;
9515                     break;
9516                 default:
9517                     return -TARGET_EINVAL;
9518                 }
9519             } else {
9520                 how = 0;
9521                 set_ptr = NULL;
9522             }
9523             ret = do_sigprocmask(how, set_ptr, &oldset);
9524             if (!is_error(ret) && arg3) {
9525                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9526                     return -TARGET_EFAULT;
9527                 host_to_target_sigset(p, &oldset);
9528                 unlock_user(p, arg3, sizeof(target_sigset_t));
9529             }
9530         }
9531         return ret;
9532 #ifdef TARGET_NR_sigpending
9533     case TARGET_NR_sigpending:
9534         {
9535             sigset_t set;
9536             ret = get_errno(sigpending(&set));
9537             if (!is_error(ret)) {
9538                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9539                     return -TARGET_EFAULT;
9540                 host_to_target_old_sigset(p, &set);
9541                 unlock_user(p, arg1, sizeof(target_sigset_t));
9542             }
9543         }
9544         return ret;
9545 #endif
9546     case TARGET_NR_rt_sigpending:
9547         {
9548             sigset_t set;
9549 
9550             /* Yes, this check is >, not != like most. We follow the kernel's
9551              * logic and it does it like this because it implements
9552              * NR_sigpending through the same code path, and in that case
9553              * the old_sigset_t is smaller in size.
9554              */
9555             if (arg2 > sizeof(target_sigset_t)) {
9556                 return -TARGET_EINVAL;
9557             }
9558 
9559             ret = get_errno(sigpending(&set));
9560             if (!is_error(ret)) {
9561                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9562                     return -TARGET_EFAULT;
9563                 host_to_target_sigset(p, &set);
9564                 unlock_user(p, arg1, sizeof(target_sigset_t));
9565             }
9566         }
9567         return ret;
9568 #ifdef TARGET_NR_sigsuspend
9569     case TARGET_NR_sigsuspend:
9570         {
9571             sigset_t *set;
9572 
9573 #if defined(TARGET_ALPHA)
9574             TaskState *ts = cpu->opaque;
9575             /* target_to_host_old_sigset will bswap back */
9576             abi_ulong mask = tswapal(arg1);
9577             set = &ts->sigsuspend_mask;
9578             target_to_host_old_sigset(set, &mask);
9579 #else
9580             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9581             if (ret != 0) {
9582                 return ret;
9583             }
9584 #endif
9585             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9586             finish_sigsuspend_mask(ret);
9587         }
9588         return ret;
9589 #endif
9590     case TARGET_NR_rt_sigsuspend:
9591         {
9592             sigset_t *set;
9593 
9594             ret = process_sigsuspend_mask(&set, arg1, arg2);
9595             if (ret != 0) {
9596                 return ret;
9597             }
9598             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9599             finish_sigsuspend_mask(ret);
9600         }
9601         return ret;
9602 #ifdef TARGET_NR_rt_sigtimedwait
9603     case TARGET_NR_rt_sigtimedwait:
9604         {
9605             sigset_t set;
9606             struct timespec uts, *puts;
9607             siginfo_t uinfo;
9608 
9609             if (arg4 != sizeof(target_sigset_t)) {
9610                 return -TARGET_EINVAL;
9611             }
9612 
9613             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9614                 return -TARGET_EFAULT;
9615             target_to_host_sigset(&set, p);
9616             unlock_user(p, arg1, 0);
9617             if (arg3) {
9618                 puts = &uts;
9619                 if (target_to_host_timespec(puts, arg3)) {
9620                     return -TARGET_EFAULT;
9621                 }
9622             } else {
9623                 puts = NULL;
9624             }
9625             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9626                                                  SIGSET_T_SIZE));
9627             if (!is_error(ret)) {
9628                 if (arg2) {
9629                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9630                                   0);
9631                     if (!p) {
9632                         return -TARGET_EFAULT;
9633                     }
9634                     host_to_target_siginfo(p, &uinfo);
9635                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9636                 }
9637                 ret = host_to_target_signal(ret);
9638             }
9639         }
9640         return ret;
9641 #endif
9642 #ifdef TARGET_NR_rt_sigtimedwait_time64
9643     case TARGET_NR_rt_sigtimedwait_time64:
9644         {
9645             sigset_t set;
9646             struct timespec uts, *puts;
9647             siginfo_t uinfo;
9648 
9649             if (arg4 != sizeof(target_sigset_t)) {
9650                 return -TARGET_EINVAL;
9651             }
9652 
9653             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9654             if (!p) {
9655                 return -TARGET_EFAULT;
9656             }
9657             target_to_host_sigset(&set, p);
9658             unlock_user(p, arg1, 0);
9659             if (arg3) {
9660                 puts = &uts;
9661                 if (target_to_host_timespec64(puts, arg3)) {
9662                     return -TARGET_EFAULT;
9663                 }
9664             } else {
9665                 puts = NULL;
9666             }
9667             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9668                                                  SIGSET_T_SIZE));
9669             if (!is_error(ret)) {
9670                 if (arg2) {
9671                     p = lock_user(VERIFY_WRITE, arg2,
9672                                   sizeof(target_siginfo_t), 0);
9673                     if (!p) {
9674                         return -TARGET_EFAULT;
9675                     }
9676                     host_to_target_siginfo(p, &uinfo);
9677                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9678                 }
9679                 ret = host_to_target_signal(ret);
9680             }
9681         }
9682         return ret;
9683 #endif
9684     case TARGET_NR_rt_sigqueueinfo:
9685         {
9686             siginfo_t uinfo;
9687 
9688             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9689             if (!p) {
9690                 return -TARGET_EFAULT;
9691             }
9692             target_to_host_siginfo(&uinfo, p);
9693             unlock_user(p, arg3, 0);
9694             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9695         }
9696         return ret;
9697     case TARGET_NR_rt_tgsigqueueinfo:
9698         {
9699             siginfo_t uinfo;
9700 
9701             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9702             if (!p) {
9703                 return -TARGET_EFAULT;
9704             }
9705             target_to_host_siginfo(&uinfo, p);
9706             unlock_user(p, arg4, 0);
9707             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9708         }
9709         return ret;
9710 #ifdef TARGET_NR_sigreturn
9711     case TARGET_NR_sigreturn:
9712         if (block_signals()) {
9713             return -QEMU_ERESTARTSYS;
9714         }
9715         return do_sigreturn(cpu_env);
9716 #endif
9717     case TARGET_NR_rt_sigreturn:
9718         if (block_signals()) {
9719             return -QEMU_ERESTARTSYS;
9720         }
9721         return do_rt_sigreturn(cpu_env);
9722     case TARGET_NR_sethostname:
9723         if (!(p = lock_user_string(arg1)))
9724             return -TARGET_EFAULT;
9725         ret = get_errno(sethostname(p, arg2));
9726         unlock_user(p, arg1, 0);
9727         return ret;
9728 #ifdef TARGET_NR_setrlimit
9729     case TARGET_NR_setrlimit:
9730         {
9731             int resource = target_to_host_resource(arg1);
9732             struct target_rlimit *target_rlim;
9733             struct rlimit rlim;
9734             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9735                 return -TARGET_EFAULT;
9736             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9737             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9738             unlock_user_struct(target_rlim, arg2, 0);
9739             /*
9740              * If we just passed through resource limit settings for memory then
9741              * they would also apply to QEMU's own allocations, and QEMU will
9742              * crash or hang or die if its allocations fail. Ideally we would
9743              * track the guest allocations in QEMU and apply the limits ourselves.
9744              * For now, just tell the guest the call succeeded but don't actually
9745              * limit anything.
9746              */
9747             if (resource != RLIMIT_AS &&
9748                 resource != RLIMIT_DATA &&
9749                 resource != RLIMIT_STACK) {
9750                 return get_errno(setrlimit(resource, &rlim));
9751             } else {
9752                 return 0;
9753             }
9754         }
9755 #endif
9756 #ifdef TARGET_NR_getrlimit
9757     case TARGET_NR_getrlimit:
9758         {
9759             int resource = target_to_host_resource(arg1);
9760             struct target_rlimit *target_rlim;
9761             struct rlimit rlim;
9762 
9763             ret = get_errno(getrlimit(resource, &rlim));
9764             if (!is_error(ret)) {
9765                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9766                     return -TARGET_EFAULT;
9767                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9768                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9769                 unlock_user_struct(target_rlim, arg2, 1);
9770             }
9771         }
9772         return ret;
9773 #endif
9774     case TARGET_NR_getrusage:
9775         {
9776             struct rusage rusage;
9777             ret = get_errno(getrusage(arg1, &rusage));
9778             if (!is_error(ret)) {
9779                 ret = host_to_target_rusage(arg2, &rusage);
9780             }
9781         }
9782         return ret;
9783 #if defined(TARGET_NR_gettimeofday)
9784     case TARGET_NR_gettimeofday:
9785         {
9786             struct timeval tv;
9787             struct timezone tz;
9788 
9789             ret = get_errno(gettimeofday(&tv, &tz));
9790             if (!is_error(ret)) {
9791                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9792                     return -TARGET_EFAULT;
9793                 }
9794                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9795                     return -TARGET_EFAULT;
9796                 }
9797             }
9798         }
9799         return ret;
9800 #endif
9801 #if defined(TARGET_NR_settimeofday)
9802     case TARGET_NR_settimeofday:
9803         {
9804             struct timeval tv, *ptv = NULL;
9805             struct timezone tz, *ptz = NULL;
9806 
9807             if (arg1) {
9808                 if (copy_from_user_timeval(&tv, arg1)) {
9809                     return -TARGET_EFAULT;
9810                 }
9811                 ptv = &tv;
9812             }
9813 
9814             if (arg2) {
9815                 if (copy_from_user_timezone(&tz, arg2)) {
9816                     return -TARGET_EFAULT;
9817                 }
9818                 ptz = &tz;
9819             }
9820 
9821             return get_errno(settimeofday(ptv, ptz));
9822         }
9823 #endif
9824 #if defined(TARGET_NR_select)
9825     case TARGET_NR_select:
9826 #if defined(TARGET_WANT_NI_OLD_SELECT)
9827         /* some architectures used to have old_select here
9828          * but now ENOSYS it.
9829          */
9830         ret = -TARGET_ENOSYS;
9831 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9832         ret = do_old_select(arg1);
9833 #else
9834         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9835 #endif
9836         return ret;
9837 #endif
9838 #ifdef TARGET_NR_pselect6
9839     case TARGET_NR_pselect6:
9840         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9841 #endif
9842 #ifdef TARGET_NR_pselect6_time64
9843     case TARGET_NR_pselect6_time64:
9844         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9845 #endif
9846 #ifdef TARGET_NR_symlink
9847     case TARGET_NR_symlink:
9848         {
9849             void *p2;
9850             p = lock_user_string(arg1);
9851             p2 = lock_user_string(arg2);
9852             if (!p || !p2)
9853                 ret = -TARGET_EFAULT;
9854             else
9855                 ret = get_errno(symlink(p, p2));
9856             unlock_user(p2, arg2, 0);
9857             unlock_user(p, arg1, 0);
9858         }
9859         return ret;
9860 #endif
9861 #if defined(TARGET_NR_symlinkat)
9862     case TARGET_NR_symlinkat:
9863         {
9864             void *p2;
9865             p  = lock_user_string(arg1);
9866             p2 = lock_user_string(arg3);
9867             if (!p || !p2)
9868                 ret = -TARGET_EFAULT;
9869             else
9870                 ret = get_errno(symlinkat(p, arg2, p2));
9871             unlock_user(p2, arg3, 0);
9872             unlock_user(p, arg1, 0);
9873         }
9874         return ret;
9875 #endif
9876 #ifdef TARGET_NR_readlink
9877     case TARGET_NR_readlink:
9878         {
9879             void *p2;
9880             p = lock_user_string(arg1);
9881             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9882             if (!p || !p2) {
9883                 ret = -TARGET_EFAULT;
9884             } else if (!arg3) {
9885                 /* Short circuit this for the magic exe check. */
9886                 ret = -TARGET_EINVAL;
9887             } else if (is_proc_myself((const char *)p, "exe")) {
9888                 char real[PATH_MAX], *temp;
9889                 temp = realpath(exec_path, real);
9890                 /* Return value is # of bytes that we wrote to the buffer. */
9891                 if (temp == NULL) {
9892                     ret = get_errno(-1);
9893                 } else {
9894                     /* Don't worry about sign mismatch as earlier mapping
9895                      * logic would have thrown a bad address error. */
9896                     ret = MIN(strlen(real), arg3);
9897                     /* We cannot NUL terminate the string. */
9898                     memcpy(p2, real, ret);
9899                 }
9900             } else {
9901                 ret = get_errno(readlink(path(p), p2, arg3));
9902             }
9903             unlock_user(p2, arg2, ret);
9904             unlock_user(p, arg1, 0);
9905         }
9906         return ret;
9907 #endif
9908 #if defined(TARGET_NR_readlinkat)
9909     case TARGET_NR_readlinkat:
9910         {
9911             void *p2;
9912             p  = lock_user_string(arg2);
9913             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9914             if (!p || !p2) {
9915                 ret = -TARGET_EFAULT;
9916             } else if (is_proc_myself((const char *)p, "exe")) {
9917                 char real[PATH_MAX], *temp;
9918                 temp = realpath(exec_path, real);
9919                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9920                 snprintf((char *)p2, arg4, "%s", real);
9921             } else {
9922                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9923             }
9924             unlock_user(p2, arg3, ret);
9925             unlock_user(p, arg2, 0);
9926         }
9927         return ret;
9928 #endif
9929 #ifdef TARGET_NR_swapon
9930     case TARGET_NR_swapon:
9931         if (!(p = lock_user_string(arg1)))
9932             return -TARGET_EFAULT;
9933         ret = get_errno(swapon(p, arg2));
9934         unlock_user(p, arg1, 0);
9935         return ret;
9936 #endif
9937     case TARGET_NR_reboot:
9938         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9939            /* arg4 must be ignored in all other cases */
9940            p = lock_user_string(arg4);
9941            if (!p) {
9942                return -TARGET_EFAULT;
9943            }
9944            ret = get_errno(reboot(arg1, arg2, arg3, p));
9945            unlock_user(p, arg4, 0);
9946         } else {
9947            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9948         }
9949         return ret;
9950 #ifdef TARGET_NR_mmap
9951     case TARGET_NR_mmap:
9952 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9953     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9954     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9955     || defined(TARGET_S390X)
9956         {
9957             abi_ulong *v;
9958             abi_ulong v1, v2, v3, v4, v5, v6;
9959             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9960                 return -TARGET_EFAULT;
9961             v1 = tswapal(v[0]);
9962             v2 = tswapal(v[1]);
9963             v3 = tswapal(v[2]);
9964             v4 = tswapal(v[3]);
9965             v5 = tswapal(v[4]);
9966             v6 = tswapal(v[5]);
9967             unlock_user(v, arg1, 0);
9968             ret = get_errno(target_mmap(v1, v2, v3,
9969                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9970                                         v5, v6));
9971         }
9972 #else
9973         /* mmap pointers are always untagged */
9974         ret = get_errno(target_mmap(arg1, arg2, arg3,
9975                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9976                                     arg5,
9977                                     arg6));
9978 #endif
9979         return ret;
9980 #endif
9981 #ifdef TARGET_NR_mmap2
9982     case TARGET_NR_mmap2:
9983 #ifndef MMAP_SHIFT
9984 #define MMAP_SHIFT 12
9985 #endif
9986         ret = target_mmap(arg1, arg2, arg3,
9987                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9988                           arg5, arg6 << MMAP_SHIFT);
9989         return get_errno(ret);
9990 #endif
9991     case TARGET_NR_munmap:
9992         arg1 = cpu_untagged_addr(cpu, arg1);
9993         return get_errno(target_munmap(arg1, arg2));
9994     case TARGET_NR_mprotect:
9995         arg1 = cpu_untagged_addr(cpu, arg1);
9996         {
9997             TaskState *ts = cpu->opaque;
9998             /* Special hack to detect libc making the stack executable.  */
9999             if ((arg3 & PROT_GROWSDOWN)
10000                 && arg1 >= ts->info->stack_limit
10001                 && arg1 <= ts->info->start_stack) {
10002                 arg3 &= ~PROT_GROWSDOWN;
10003                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10004                 arg1 = ts->info->stack_limit;
10005             }
10006         }
10007         return get_errno(target_mprotect(arg1, arg2, arg3));
10008 #ifdef TARGET_NR_mremap
10009     case TARGET_NR_mremap:
10010         arg1 = cpu_untagged_addr(cpu, arg1);
10011         /* mremap new_addr (arg5) is always untagged */
10012         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10013 #endif
10014         /* ??? msync/mlock/munlock are broken for softmmu.  */
10015 #ifdef TARGET_NR_msync
10016     case TARGET_NR_msync:
10017         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10018 #endif
10019 #ifdef TARGET_NR_mlock
10020     case TARGET_NR_mlock:
10021         return get_errno(mlock(g2h(cpu, arg1), arg2));
10022 #endif
10023 #ifdef TARGET_NR_munlock
10024     case TARGET_NR_munlock:
10025         return get_errno(munlock(g2h(cpu, arg1), arg2));
10026 #endif
10027 #ifdef TARGET_NR_mlockall
10028     case TARGET_NR_mlockall:
10029         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10030 #endif
10031 #ifdef TARGET_NR_munlockall
10032     case TARGET_NR_munlockall:
10033         return get_errno(munlockall());
10034 #endif
10035 #ifdef TARGET_NR_truncate
10036     case TARGET_NR_truncate:
10037         if (!(p = lock_user_string(arg1)))
10038             return -TARGET_EFAULT;
10039         ret = get_errno(truncate(p, arg2));
10040         unlock_user(p, arg1, 0);
10041         return ret;
10042 #endif
10043 #ifdef TARGET_NR_ftruncate
10044     case TARGET_NR_ftruncate:
10045         return get_errno(ftruncate(arg1, arg2));
10046 #endif
10047     case TARGET_NR_fchmod:
10048         return get_errno(fchmod(arg1, arg2));
10049 #if defined(TARGET_NR_fchmodat)
10050     case TARGET_NR_fchmodat:
10051         if (!(p = lock_user_string(arg2)))
10052             return -TARGET_EFAULT;
10053         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10054         unlock_user(p, arg2, 0);
10055         return ret;
10056 #endif
10057     case TARGET_NR_getpriority:
10058         /* Note that negative values are valid for getpriority, so we must
10059            differentiate based on errno settings.  */
10060         errno = 0;
10061         ret = getpriority(arg1, arg2);
10062         if (ret == -1 && errno != 0) {
10063             return -host_to_target_errno(errno);
10064         }
10065 #ifdef TARGET_ALPHA
10066         /* Return value is the unbiased priority.  Signal no error.  */
10067         cpu_env->ir[IR_V0] = 0;
10068 #else
10069         /* Return value is a biased priority to avoid negative numbers.  */
10070         ret = 20 - ret;
10071 #endif
10072         return ret;
10073     case TARGET_NR_setpriority:
10074         return get_errno(setpriority(arg1, arg2, arg3));
10075 #ifdef TARGET_NR_statfs
10076     case TARGET_NR_statfs:
10077         if (!(p = lock_user_string(arg1))) {
10078             return -TARGET_EFAULT;
10079         }
10080         ret = get_errno(statfs(path(p), &stfs));
10081         unlock_user(p, arg1, 0);
10082     convert_statfs:
10083         if (!is_error(ret)) {
10084             struct target_statfs *target_stfs;
10085 
10086             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10087                 return -TARGET_EFAULT;
10088             __put_user(stfs.f_type, &target_stfs->f_type);
10089             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10090             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10091             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10092             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10093             __put_user(stfs.f_files, &target_stfs->f_files);
10094             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10095             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10096             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10097             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10098             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10099 #ifdef _STATFS_F_FLAGS
10100             __put_user(stfs.f_flags, &target_stfs->f_flags);
10101 #else
10102             __put_user(0, &target_stfs->f_flags);
10103 #endif
10104             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10105             unlock_user_struct(target_stfs, arg2, 1);
10106         }
10107         return ret;
10108 #endif
10109 #ifdef TARGET_NR_fstatfs
10110     case TARGET_NR_fstatfs:
10111         ret = get_errno(fstatfs(arg1, &stfs));
10112         goto convert_statfs;
10113 #endif
10114 #ifdef TARGET_NR_statfs64
10115     case TARGET_NR_statfs64:
10116         if (!(p = lock_user_string(arg1))) {
10117             return -TARGET_EFAULT;
10118         }
10119         ret = get_errno(statfs(path(p), &stfs));
10120         unlock_user(p, arg1, 0);
10121     convert_statfs64:
10122         if (!is_error(ret)) {
10123             struct target_statfs64 *target_stfs;
10124 
10125             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10126                 return -TARGET_EFAULT;
10127             __put_user(stfs.f_type, &target_stfs->f_type);
10128             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10129             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10130             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10131             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10132             __put_user(stfs.f_files, &target_stfs->f_files);
10133             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10134             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10135             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10136             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10137             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10138 #ifdef _STATFS_F_FLAGS
10139             __put_user(stfs.f_flags, &target_stfs->f_flags);
10140 #else
10141             __put_user(0, &target_stfs->f_flags);
10142 #endif
10143             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10144             unlock_user_struct(target_stfs, arg3, 1);
10145         }
10146         return ret;
10147     case TARGET_NR_fstatfs64:
10148         ret = get_errno(fstatfs(arg1, &stfs));
10149         goto convert_statfs64;
10150 #endif
10151 #ifdef TARGET_NR_socketcall
10152     case TARGET_NR_socketcall:
10153         return do_socketcall(arg1, arg2);
10154 #endif
10155 #ifdef TARGET_NR_accept
10156     case TARGET_NR_accept:
10157         return do_accept4(arg1, arg2, arg3, 0);
10158 #endif
10159 #ifdef TARGET_NR_accept4
10160     case TARGET_NR_accept4:
10161         return do_accept4(arg1, arg2, arg3, arg4);
10162 #endif
10163 #ifdef TARGET_NR_bind
10164     case TARGET_NR_bind:
10165         return do_bind(arg1, arg2, arg3);
10166 #endif
10167 #ifdef TARGET_NR_connect
10168     case TARGET_NR_connect:
10169         return do_connect(arg1, arg2, arg3);
10170 #endif
10171 #ifdef TARGET_NR_getpeername
10172     case TARGET_NR_getpeername:
10173         return do_getpeername(arg1, arg2, arg3);
10174 #endif
10175 #ifdef TARGET_NR_getsockname
10176     case TARGET_NR_getsockname:
10177         return do_getsockname(arg1, arg2, arg3);
10178 #endif
10179 #ifdef TARGET_NR_getsockopt
10180     case TARGET_NR_getsockopt:
10181         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10182 #endif
10183 #ifdef TARGET_NR_listen
10184     case TARGET_NR_listen:
10185         return get_errno(listen(arg1, arg2));
10186 #endif
10187 #ifdef TARGET_NR_recv
10188     case TARGET_NR_recv:
10189         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10190 #endif
10191 #ifdef TARGET_NR_recvfrom
10192     case TARGET_NR_recvfrom:
10193         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10194 #endif
10195 #ifdef TARGET_NR_recvmsg
10196     case TARGET_NR_recvmsg:
10197         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10198 #endif
10199 #ifdef TARGET_NR_send
10200     case TARGET_NR_send:
10201         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10202 #endif
10203 #ifdef TARGET_NR_sendmsg
10204     case TARGET_NR_sendmsg:
10205         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10206 #endif
10207 #ifdef TARGET_NR_sendmmsg
10208     case TARGET_NR_sendmmsg:
10209         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10210 #endif
10211 #ifdef TARGET_NR_recvmmsg
10212     case TARGET_NR_recvmmsg:
10213         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10214 #endif
10215 #ifdef TARGET_NR_sendto
10216     case TARGET_NR_sendto:
10217         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10218 #endif
10219 #ifdef TARGET_NR_shutdown
10220     case TARGET_NR_shutdown:
10221         return get_errno(shutdown(arg1, arg2));
10222 #endif
10223 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10224     case TARGET_NR_getrandom:
10225         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10226         if (!p) {
10227             return -TARGET_EFAULT;
10228         }
10229         ret = get_errno(getrandom(p, arg2, arg3));
10230         unlock_user(p, arg1, ret);
10231         return ret;
10232 #endif
10233 #ifdef TARGET_NR_socket
10234     case TARGET_NR_socket:
10235         return do_socket(arg1, arg2, arg3);
10236 #endif
10237 #ifdef TARGET_NR_socketpair
10238     case TARGET_NR_socketpair:
10239         return do_socketpair(arg1, arg2, arg3, arg4);
10240 #endif
10241 #ifdef TARGET_NR_setsockopt
10242     case TARGET_NR_setsockopt:
10243         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10244 #endif
10245 #if defined(TARGET_NR_syslog)
10246     case TARGET_NR_syslog:
10247         {
10248             int len = arg2;
10249 
10250             switch (arg1) {
10251             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10252             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10253             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10254             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10255             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10256             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10257             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10258             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10259                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10260             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10261             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10262             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10263                 {
10264                     if (len < 0) {
10265                         return -TARGET_EINVAL;
10266                     }
10267                     if (len == 0) {
10268                         return 0;
10269                     }
10270                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10271                     if (!p) {
10272                         return -TARGET_EFAULT;
10273                     }
10274                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10275                     unlock_user(p, arg2, arg3);
10276                 }
10277                 return ret;
10278             default:
10279                 return -TARGET_EINVAL;
10280             }
10281         }
10282         break;
10283 #endif
10284     case TARGET_NR_setitimer:
10285         {
10286             struct itimerval value, ovalue, *pvalue;
10287 
10288             if (arg2) {
10289                 pvalue = &value;
10290                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10291                     || copy_from_user_timeval(&pvalue->it_value,
10292                                               arg2 + sizeof(struct target_timeval)))
10293                     return -TARGET_EFAULT;
10294             } else {
10295                 pvalue = NULL;
10296             }
10297             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10298             if (!is_error(ret) && arg3) {
10299                 if (copy_to_user_timeval(arg3,
10300                                          &ovalue.it_interval)
10301                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10302                                             &ovalue.it_value))
10303                     return -TARGET_EFAULT;
10304             }
10305         }
10306         return ret;
10307     case TARGET_NR_getitimer:
10308         {
10309             struct itimerval value;
10310 
10311             ret = get_errno(getitimer(arg1, &value));
10312             if (!is_error(ret) && arg2) {
10313                 if (copy_to_user_timeval(arg2,
10314                                          &value.it_interval)
10315                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10316                                             &value.it_value))
10317                     return -TARGET_EFAULT;
10318             }
10319         }
10320         return ret;
10321 #ifdef TARGET_NR_stat
10322     case TARGET_NR_stat:
10323         if (!(p = lock_user_string(arg1))) {
10324             return -TARGET_EFAULT;
10325         }
10326         ret = get_errno(stat(path(p), &st));
10327         unlock_user(p, arg1, 0);
10328         goto do_stat;
10329 #endif
10330 #ifdef TARGET_NR_lstat
10331     case TARGET_NR_lstat:
10332         if (!(p = lock_user_string(arg1))) {
10333             return -TARGET_EFAULT;
10334         }
10335         ret = get_errno(lstat(path(p), &st));
10336         unlock_user(p, arg1, 0);
10337         goto do_stat;
10338 #endif
10339 #ifdef TARGET_NR_fstat
10340     case TARGET_NR_fstat:
10341         {
10342             ret = get_errno(fstat(arg1, &st));
10343 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10344         do_stat:
10345 #endif
10346             if (!is_error(ret)) {
10347                 struct target_stat *target_st;
10348 
10349                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10350                     return -TARGET_EFAULT;
10351                 memset(target_st, 0, sizeof(*target_st));
10352                 __put_user(st.st_dev, &target_st->st_dev);
10353                 __put_user(st.st_ino, &target_st->st_ino);
10354                 __put_user(st.st_mode, &target_st->st_mode);
10355                 __put_user(st.st_uid, &target_st->st_uid);
10356                 __put_user(st.st_gid, &target_st->st_gid);
10357                 __put_user(st.st_nlink, &target_st->st_nlink);
10358                 __put_user(st.st_rdev, &target_st->st_rdev);
10359                 __put_user(st.st_size, &target_st->st_size);
10360                 __put_user(st.st_blksize, &target_st->st_blksize);
10361                 __put_user(st.st_blocks, &target_st->st_blocks);
10362                 __put_user(st.st_atime, &target_st->target_st_atime);
10363                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10364                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10365 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10366                 __put_user(st.st_atim.tv_nsec,
10367                            &target_st->target_st_atime_nsec);
10368                 __put_user(st.st_mtim.tv_nsec,
10369                            &target_st->target_st_mtime_nsec);
10370                 __put_user(st.st_ctim.tv_nsec,
10371                            &target_st->target_st_ctime_nsec);
10372 #endif
10373                 unlock_user_struct(target_st, arg2, 1);
10374             }
10375         }
10376         return ret;
10377 #endif
10378     case TARGET_NR_vhangup:
10379         return get_errno(vhangup());
10380 #ifdef TARGET_NR_syscall
10381     case TARGET_NR_syscall:
10382         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10383                           arg6, arg7, arg8, 0);
10384 #endif
10385 #if defined(TARGET_NR_wait4)
10386     case TARGET_NR_wait4:
10387         {
10388             int status;
10389             abi_long status_ptr = arg2;
10390             struct rusage rusage, *rusage_ptr;
10391             abi_ulong target_rusage = arg4;
10392             abi_long rusage_err;
10393             if (target_rusage)
10394                 rusage_ptr = &rusage;
10395             else
10396                 rusage_ptr = NULL;
10397             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10398             if (!is_error(ret)) {
10399                 if (status_ptr && ret) {
10400                     status = host_to_target_waitstatus(status);
10401                     if (put_user_s32(status, status_ptr))
10402                         return -TARGET_EFAULT;
10403                 }
10404                 if (target_rusage) {
10405                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10406                     if (rusage_err) {
10407                         ret = rusage_err;
10408                     }
10409                 }
10410             }
10411         }
10412         return ret;
10413 #endif
10414 #ifdef TARGET_NR_swapoff
10415     case TARGET_NR_swapoff:
10416         if (!(p = lock_user_string(arg1)))
10417             return -TARGET_EFAULT;
10418         ret = get_errno(swapoff(p));
10419         unlock_user(p, arg1, 0);
10420         return ret;
10421 #endif
10422     case TARGET_NR_sysinfo:
10423         {
10424             struct target_sysinfo *target_value;
10425             struct sysinfo value;
10426             ret = get_errno(sysinfo(&value));
10427             if (!is_error(ret) && arg1)
10428             {
10429                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10430                     return -TARGET_EFAULT;
10431                 __put_user(value.uptime, &target_value->uptime);
10432                 __put_user(value.loads[0], &target_value->loads[0]);
10433                 __put_user(value.loads[1], &target_value->loads[1]);
10434                 __put_user(value.loads[2], &target_value->loads[2]);
10435                 __put_user(value.totalram, &target_value->totalram);
10436                 __put_user(value.freeram, &target_value->freeram);
10437                 __put_user(value.sharedram, &target_value->sharedram);
10438                 __put_user(value.bufferram, &target_value->bufferram);
10439                 __put_user(value.totalswap, &target_value->totalswap);
10440                 __put_user(value.freeswap, &target_value->freeswap);
10441                 __put_user(value.procs, &target_value->procs);
10442                 __put_user(value.totalhigh, &target_value->totalhigh);
10443                 __put_user(value.freehigh, &target_value->freehigh);
10444                 __put_user(value.mem_unit, &target_value->mem_unit);
10445                 unlock_user_struct(target_value, arg1, 1);
10446             }
10447         }
10448         return ret;
10449 #ifdef TARGET_NR_ipc
10450     case TARGET_NR_ipc:
10451         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10452 #endif
10453 #ifdef TARGET_NR_semget
10454     case TARGET_NR_semget:
10455         return get_errno(semget(arg1, arg2, arg3));
10456 #endif
10457 #ifdef TARGET_NR_semop
10458     case TARGET_NR_semop:
10459         return do_semtimedop(arg1, arg2, arg3, 0, false);
10460 #endif
10461 #ifdef TARGET_NR_semtimedop
10462     case TARGET_NR_semtimedop:
10463         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10464 #endif
10465 #ifdef TARGET_NR_semtimedop_time64
10466     case TARGET_NR_semtimedop_time64:
10467         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10468 #endif
10469 #ifdef TARGET_NR_semctl
10470     case TARGET_NR_semctl:
10471         return do_semctl(arg1, arg2, arg3, arg4);
10472 #endif
10473 #ifdef TARGET_NR_msgctl
10474     case TARGET_NR_msgctl:
10475         return do_msgctl(arg1, arg2, arg3);
10476 #endif
10477 #ifdef TARGET_NR_msgget
10478     case TARGET_NR_msgget:
10479         return get_errno(msgget(arg1, arg2));
10480 #endif
10481 #ifdef TARGET_NR_msgrcv
10482     case TARGET_NR_msgrcv:
10483         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10484 #endif
10485 #ifdef TARGET_NR_msgsnd
10486     case TARGET_NR_msgsnd:
10487         return do_msgsnd(arg1, arg2, arg3, arg4);
10488 #endif
10489 #ifdef TARGET_NR_shmget
10490     case TARGET_NR_shmget:
10491         return get_errno(shmget(arg1, arg2, arg3));
10492 #endif
10493 #ifdef TARGET_NR_shmctl
10494     case TARGET_NR_shmctl:
10495         return do_shmctl(arg1, arg2, arg3);
10496 #endif
10497 #ifdef TARGET_NR_shmat
10498     case TARGET_NR_shmat:
10499         return do_shmat(cpu_env, arg1, arg2, arg3);
10500 #endif
10501 #ifdef TARGET_NR_shmdt
10502     case TARGET_NR_shmdt:
10503         return do_shmdt(arg1);
10504 #endif
10505     case TARGET_NR_fsync:
10506         return get_errno(fsync(arg1));
10507     case TARGET_NR_clone:
10508         /* Linux manages to have three different orderings for its
10509          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10510          * match the kernel's CONFIG_CLONE_* settings.
10511          * Microblaze is further special in that it uses a sixth
10512          * implicit argument to clone for the TLS pointer.
10513          */
10514 #if defined(TARGET_MICROBLAZE)
10515         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10516 #elif defined(TARGET_CLONE_BACKWARDS)
10517         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10518 #elif defined(TARGET_CLONE_BACKWARDS2)
10519         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10520 #else
10521         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10522 #endif
10523         return ret;
10524 #ifdef __NR_exit_group
10525         /* new thread calls */
10526     case TARGET_NR_exit_group:
10527         preexit_cleanup(cpu_env, arg1);
10528         return get_errno(exit_group(arg1));
10529 #endif
10530     case TARGET_NR_setdomainname:
10531         if (!(p = lock_user_string(arg1)))
10532             return -TARGET_EFAULT;
10533         ret = get_errno(setdomainname(p, arg2));
10534         unlock_user(p, arg1, 0);
10535         return ret;
10536     case TARGET_NR_uname:
10537         /* no need to transcode because we use the linux syscall */
10538         {
10539             struct new_utsname * buf;
10540 
10541             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10542                 return -TARGET_EFAULT;
10543             ret = get_errno(sys_uname(buf));
10544             if (!is_error(ret)) {
10545                 /* Overwrite the native machine name with whatever is being
10546                    emulated. */
10547                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10548                           sizeof(buf->machine));
10549                 /* Allow the user to override the reported release.  */
10550                 if (qemu_uname_release && *qemu_uname_release) {
10551                     g_strlcpy(buf->release, qemu_uname_release,
10552                               sizeof(buf->release));
10553                 }
10554             }
10555             unlock_user_struct(buf, arg1, 1);
10556         }
10557         return ret;
10558 #ifdef TARGET_I386
10559     case TARGET_NR_modify_ldt:
10560         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10561 #if !defined(TARGET_X86_64)
10562     case TARGET_NR_vm86:
10563         return do_vm86(cpu_env, arg1, arg2);
10564 #endif
10565 #endif
10566 #if defined(TARGET_NR_adjtimex)
10567     case TARGET_NR_adjtimex:
10568         {
10569             struct timex host_buf;
10570 
10571             if (target_to_host_timex(&host_buf, arg1) != 0) {
10572                 return -TARGET_EFAULT;
10573             }
10574             ret = get_errno(adjtimex(&host_buf));
10575             if (!is_error(ret)) {
10576                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10577                     return -TARGET_EFAULT;
10578                 }
10579             }
10580         }
10581         return ret;
10582 #endif
10583 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10584     case TARGET_NR_clock_adjtime:
10585         {
10586             struct timex htx, *phtx = &htx;
10587 
10588             if (target_to_host_timex(phtx, arg2) != 0) {
10589                 return -TARGET_EFAULT;
10590             }
10591             ret = get_errno(clock_adjtime(arg1, phtx));
10592             if (!is_error(ret) && phtx) {
10593                 if (host_to_target_timex(arg2, phtx) != 0) {
10594                     return -TARGET_EFAULT;
10595                 }
10596             }
10597         }
10598         return ret;
10599 #endif
10600 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10601     case TARGET_NR_clock_adjtime64:
10602         {
10603             struct timex htx;
10604 
10605             if (target_to_host_timex64(&htx, arg2) != 0) {
10606                 return -TARGET_EFAULT;
10607             }
10608             ret = get_errno(clock_adjtime(arg1, &htx));
10609             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10610                     return -TARGET_EFAULT;
10611             }
10612         }
10613         return ret;
10614 #endif
10615     case TARGET_NR_getpgid:
10616         return get_errno(getpgid(arg1));
10617     case TARGET_NR_fchdir:
10618         return get_errno(fchdir(arg1));
10619     case TARGET_NR_personality:
10620         return get_errno(personality(arg1));
10621 #ifdef TARGET_NR__llseek /* Not on alpha */
10622     case TARGET_NR__llseek:
10623         {
10624             int64_t res;
10625 #if !defined(__NR_llseek)
10626             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10627             if (res == -1) {
10628                 ret = get_errno(res);
10629             } else {
10630                 ret = 0;
10631             }
10632 #else
10633             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10634 #endif
10635             if ((ret == 0) && put_user_s64(res, arg4)) {
10636                 return -TARGET_EFAULT;
10637             }
10638         }
10639         return ret;
10640 #endif
10641 #ifdef TARGET_NR_getdents
10642     case TARGET_NR_getdents:
10643         return do_getdents(arg1, arg2, arg3);
10644 #endif /* TARGET_NR_getdents */
10645 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10646     case TARGET_NR_getdents64:
10647         return do_getdents64(arg1, arg2, arg3);
10648 #endif /* TARGET_NR_getdents64 */
10649 #if defined(TARGET_NR__newselect)
10650     case TARGET_NR__newselect:
10651         return do_select(arg1, arg2, arg3, arg4, arg5);
10652 #endif
10653 #ifdef TARGET_NR_poll
10654     case TARGET_NR_poll:
10655         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10656 #endif
10657 #ifdef TARGET_NR_ppoll
10658     case TARGET_NR_ppoll:
10659         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10660 #endif
10661 #ifdef TARGET_NR_ppoll_time64
10662     case TARGET_NR_ppoll_time64:
10663         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10664 #endif
10665     case TARGET_NR_flock:
10666         /* NOTE: the flock constant seems to be the same for every
10667            Linux platform */
10668         return get_errno(safe_flock(arg1, arg2));
10669     case TARGET_NR_readv:
10670         {
10671             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10672             if (vec != NULL) {
10673                 ret = get_errno(safe_readv(arg1, vec, arg3));
10674                 unlock_iovec(vec, arg2, arg3, 1);
10675             } else {
10676                 ret = -host_to_target_errno(errno);
10677             }
10678         }
10679         return ret;
10680     case TARGET_NR_writev:
10681         {
10682             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10683             if (vec != NULL) {
10684                 ret = get_errno(safe_writev(arg1, vec, arg3));
10685                 unlock_iovec(vec, arg2, arg3, 0);
10686             } else {
10687                 ret = -host_to_target_errno(errno);
10688             }
10689         }
10690         return ret;
10691 #if defined(TARGET_NR_preadv)
10692     case TARGET_NR_preadv:
10693         {
10694             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10695             if (vec != NULL) {
10696                 unsigned long low, high;
10697 
10698                 target_to_host_low_high(arg4, arg5, &low, &high);
10699                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10700                 unlock_iovec(vec, arg2, arg3, 1);
10701             } else {
10702                 ret = -host_to_target_errno(errno);
10703            }
10704         }
10705         return ret;
10706 #endif
10707 #if defined(TARGET_NR_pwritev)
10708     case TARGET_NR_pwritev:
10709         {
10710             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10711             if (vec != NULL) {
10712                 unsigned long low, high;
10713 
10714                 target_to_host_low_high(arg4, arg5, &low, &high);
10715                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10716                 unlock_iovec(vec, arg2, arg3, 0);
10717             } else {
10718                 ret = -host_to_target_errno(errno);
10719            }
10720         }
10721         return ret;
10722 #endif
10723     case TARGET_NR_getsid:
10724         return get_errno(getsid(arg1));
10725 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10726     case TARGET_NR_fdatasync:
10727         return get_errno(fdatasync(arg1));
10728 #endif
10729     case TARGET_NR_sched_getaffinity:
10730         {
10731             unsigned int mask_size;
10732             unsigned long *mask;
10733 
10734             /*
10735              * sched_getaffinity needs multiples of ulong, so need to take
10736              * care of mismatches between target ulong and host ulong sizes.
10737              */
10738             if (arg2 & (sizeof(abi_ulong) - 1)) {
10739                 return -TARGET_EINVAL;
10740             }
10741             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10742 
10743             mask = alloca(mask_size);
10744             memset(mask, 0, mask_size);
10745             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10746 
10747             if (!is_error(ret)) {
10748                 if (ret > arg2) {
10749                     /* More data returned than the caller's buffer will fit.
10750                      * This only happens if sizeof(abi_long) < sizeof(long)
10751                      * and the caller passed us a buffer holding an odd number
10752                      * of abi_longs. If the host kernel is actually using the
10753                      * extra 4 bytes then fail EINVAL; otherwise we can just
10754                      * ignore them and only copy the interesting part.
10755                      */
10756                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10757                     if (numcpus > arg2 * 8) {
10758                         return -TARGET_EINVAL;
10759                     }
10760                     ret = arg2;
10761                 }
10762 
10763                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10764                     return -TARGET_EFAULT;
10765                 }
10766             }
10767         }
10768         return ret;
10769     case TARGET_NR_sched_setaffinity:
10770         {
10771             unsigned int mask_size;
10772             unsigned long *mask;
10773 
10774             /*
10775              * sched_setaffinity needs multiples of ulong, so need to take
10776              * care of mismatches between target ulong and host ulong sizes.
10777              */
10778             if (arg2 & (sizeof(abi_ulong) - 1)) {
10779                 return -TARGET_EINVAL;
10780             }
10781             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10782             mask = alloca(mask_size);
10783 
10784             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10785             if (ret) {
10786                 return ret;
10787             }
10788 
10789             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10790         }
10791     case TARGET_NR_getcpu:
10792         {
10793             unsigned cpu, node;
10794             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10795                                        arg2 ? &node : NULL,
10796                                        NULL));
10797             if (is_error(ret)) {
10798                 return ret;
10799             }
10800             if (arg1 && put_user_u32(cpu, arg1)) {
10801                 return -TARGET_EFAULT;
10802             }
10803             if (arg2 && put_user_u32(node, arg2)) {
10804                 return -TARGET_EFAULT;
10805             }
10806         }
10807         return ret;
10808     case TARGET_NR_sched_setparam:
10809         {
10810             struct target_sched_param *target_schp;
10811             struct sched_param schp;
10812 
10813             if (arg2 == 0) {
10814                 return -TARGET_EINVAL;
10815             }
10816             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10817                 return -TARGET_EFAULT;
10818             }
10819             schp.sched_priority = tswap32(target_schp->sched_priority);
10820             unlock_user_struct(target_schp, arg2, 0);
10821             return get_errno(sys_sched_setparam(arg1, &schp));
10822         }
10823     case TARGET_NR_sched_getparam:
10824         {
10825             struct target_sched_param *target_schp;
10826             struct sched_param schp;
10827 
10828             if (arg2 == 0) {
10829                 return -TARGET_EINVAL;
10830             }
10831             ret = get_errno(sys_sched_getparam(arg1, &schp));
10832             if (!is_error(ret)) {
10833                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10834                     return -TARGET_EFAULT;
10835                 }
10836                 target_schp->sched_priority = tswap32(schp.sched_priority);
10837                 unlock_user_struct(target_schp, arg2, 1);
10838             }
10839         }
10840         return ret;
10841     case TARGET_NR_sched_setscheduler:
10842         {
10843             struct target_sched_param *target_schp;
10844             struct sched_param schp;
10845             if (arg3 == 0) {
10846                 return -TARGET_EINVAL;
10847             }
10848             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10849                 return -TARGET_EFAULT;
10850             }
10851             schp.sched_priority = tswap32(target_schp->sched_priority);
10852             unlock_user_struct(target_schp, arg3, 0);
10853             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10854         }
10855     case TARGET_NR_sched_getscheduler:
10856         return get_errno(sys_sched_getscheduler(arg1));
10857     case TARGET_NR_sched_getattr:
10858         {
10859             struct target_sched_attr *target_scha;
10860             struct sched_attr scha;
10861             if (arg2 == 0) {
10862                 return -TARGET_EINVAL;
10863             }
10864             if (arg3 > sizeof(scha)) {
10865                 arg3 = sizeof(scha);
10866             }
10867             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10868             if (!is_error(ret)) {
10869                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10870                 if (!target_scha) {
10871                     return -TARGET_EFAULT;
10872                 }
10873                 target_scha->size = tswap32(scha.size);
10874                 target_scha->sched_policy = tswap32(scha.sched_policy);
10875                 target_scha->sched_flags = tswap64(scha.sched_flags);
10876                 target_scha->sched_nice = tswap32(scha.sched_nice);
10877                 target_scha->sched_priority = tswap32(scha.sched_priority);
10878                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10879                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10880                 target_scha->sched_period = tswap64(scha.sched_period);
10881                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10882                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10883                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10884                 }
10885                 unlock_user(target_scha, arg2, arg3);
10886             }
10887             return ret;
10888         }
10889     case TARGET_NR_sched_setattr:
10890         {
10891             struct target_sched_attr *target_scha;
10892             struct sched_attr scha;
10893             uint32_t size;
10894             int zeroed;
10895             if (arg2 == 0) {
10896                 return -TARGET_EINVAL;
10897             }
10898             if (get_user_u32(size, arg2)) {
10899                 return -TARGET_EFAULT;
10900             }
10901             if (!size) {
10902                 size = offsetof(struct target_sched_attr, sched_util_min);
10903             }
10904             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10905                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10906                     return -TARGET_EFAULT;
10907                 }
10908                 return -TARGET_E2BIG;
10909             }
10910 
10911             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10912             if (zeroed < 0) {
10913                 return zeroed;
10914             } else if (zeroed == 0) {
10915                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10916                     return -TARGET_EFAULT;
10917                 }
10918                 return -TARGET_E2BIG;
10919             }
10920             if (size > sizeof(struct target_sched_attr)) {
10921                 size = sizeof(struct target_sched_attr);
10922             }
10923 
10924             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10925             if (!target_scha) {
10926                 return -TARGET_EFAULT;
10927             }
10928             scha.size = size;
10929             scha.sched_policy = tswap32(target_scha->sched_policy);
10930             scha.sched_flags = tswap64(target_scha->sched_flags);
10931             scha.sched_nice = tswap32(target_scha->sched_nice);
10932             scha.sched_priority = tswap32(target_scha->sched_priority);
10933             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10934             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10935             scha.sched_period = tswap64(target_scha->sched_period);
10936             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10937                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10938                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10939             }
10940             unlock_user(target_scha, arg2, 0);
10941             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10942         }
10943     case TARGET_NR_sched_yield:
10944         return get_errno(sched_yield());
10945     case TARGET_NR_sched_get_priority_max:
10946         return get_errno(sched_get_priority_max(arg1));
10947     case TARGET_NR_sched_get_priority_min:
10948         return get_errno(sched_get_priority_min(arg1));
10949 #ifdef TARGET_NR_sched_rr_get_interval
10950     case TARGET_NR_sched_rr_get_interval:
10951         {
10952             struct timespec ts;
10953             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10954             if (!is_error(ret)) {
10955                 ret = host_to_target_timespec(arg2, &ts);
10956             }
10957         }
10958         return ret;
10959 #endif
10960 #ifdef TARGET_NR_sched_rr_get_interval_time64
10961     case TARGET_NR_sched_rr_get_interval_time64:
10962         {
10963             struct timespec ts;
10964             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10965             if (!is_error(ret)) {
10966                 ret = host_to_target_timespec64(arg2, &ts);
10967             }
10968         }
10969         return ret;
10970 #endif
10971 #if defined(TARGET_NR_nanosleep)
10972     case TARGET_NR_nanosleep:
10973         {
10974             struct timespec req, rem;
10975             target_to_host_timespec(&req, arg1);
10976             ret = get_errno(safe_nanosleep(&req, &rem));
10977             if (is_error(ret) && arg2) {
10978                 host_to_target_timespec(arg2, &rem);
10979             }
10980         }
10981         return ret;
10982 #endif
10983     case TARGET_NR_prctl:
10984         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10985         break;
10986 #ifdef TARGET_NR_arch_prctl
10987     case TARGET_NR_arch_prctl:
10988         return do_arch_prctl(cpu_env, arg1, arg2);
10989 #endif
10990 #ifdef TARGET_NR_pread64
10991     case TARGET_NR_pread64:
10992         if (regpairs_aligned(cpu_env, num)) {
10993             arg4 = arg5;
10994             arg5 = arg6;
10995         }
10996         if (arg2 == 0 && arg3 == 0) {
10997             /* Special-case NULL buffer and zero length, which should succeed */
10998             p = 0;
10999         } else {
11000             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11001             if (!p) {
11002                 return -TARGET_EFAULT;
11003             }
11004         }
11005         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11006         unlock_user(p, arg2, ret);
11007         return ret;
11008     case TARGET_NR_pwrite64:
11009         if (regpairs_aligned(cpu_env, num)) {
11010             arg4 = arg5;
11011             arg5 = arg6;
11012         }
11013         if (arg2 == 0 && arg3 == 0) {
11014             /* Special-case NULL buffer and zero length, which should succeed */
11015             p = 0;
11016         } else {
11017             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11018             if (!p) {
11019                 return -TARGET_EFAULT;
11020             }
11021         }
11022         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11023         unlock_user(p, arg2, 0);
11024         return ret;
11025 #endif
11026     case TARGET_NR_getcwd:
11027         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11028             return -TARGET_EFAULT;
11029         ret = get_errno(sys_getcwd1(p, arg2));
11030         unlock_user(p, arg1, ret);
11031         return ret;
11032     case TARGET_NR_capget:
11033     case TARGET_NR_capset:
11034     {
11035         struct target_user_cap_header *target_header;
11036         struct target_user_cap_data *target_data = NULL;
11037         struct __user_cap_header_struct header;
11038         struct __user_cap_data_struct data[2];
11039         struct __user_cap_data_struct *dataptr = NULL;
11040         int i, target_datalen;
11041         int data_items = 1;
11042 
11043         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11044             return -TARGET_EFAULT;
11045         }
11046         header.version = tswap32(target_header->version);
11047         header.pid = tswap32(target_header->pid);
11048 
11049         if (header.version != _LINUX_CAPABILITY_VERSION) {
11050             /* Version 2 and up takes pointer to two user_data structs */
11051             data_items = 2;
11052         }
11053 
11054         target_datalen = sizeof(*target_data) * data_items;
11055 
11056         if (arg2) {
11057             if (num == TARGET_NR_capget) {
11058                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11059             } else {
11060                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11061             }
11062             if (!target_data) {
11063                 unlock_user_struct(target_header, arg1, 0);
11064                 return -TARGET_EFAULT;
11065             }
11066 
11067             if (num == TARGET_NR_capset) {
11068                 for (i = 0; i < data_items; i++) {
11069                     data[i].effective = tswap32(target_data[i].effective);
11070                     data[i].permitted = tswap32(target_data[i].permitted);
11071                     data[i].inheritable = tswap32(target_data[i].inheritable);
11072                 }
11073             }
11074 
11075             dataptr = data;
11076         }
11077 
11078         if (num == TARGET_NR_capget) {
11079             ret = get_errno(capget(&header, dataptr));
11080         } else {
11081             ret = get_errno(capset(&header, dataptr));
11082         }
11083 
11084         /* The kernel always updates version for both capget and capset */
11085         target_header->version = tswap32(header.version);
11086         unlock_user_struct(target_header, arg1, 1);
11087 
11088         if (arg2) {
11089             if (num == TARGET_NR_capget) {
11090                 for (i = 0; i < data_items; i++) {
11091                     target_data[i].effective = tswap32(data[i].effective);
11092                     target_data[i].permitted = tswap32(data[i].permitted);
11093                     target_data[i].inheritable = tswap32(data[i].inheritable);
11094                 }
11095                 unlock_user(target_data, arg2, target_datalen);
11096             } else {
11097                 unlock_user(target_data, arg2, 0);
11098             }
11099         }
11100         return ret;
11101     }
11102     case TARGET_NR_sigaltstack:
11103         return do_sigaltstack(arg1, arg2, cpu_env);
11104 
11105 #ifdef CONFIG_SENDFILE
11106 #ifdef TARGET_NR_sendfile
11107     case TARGET_NR_sendfile:
11108     {
11109         off_t *offp = NULL;
11110         off_t off;
11111         if (arg3) {
11112             ret = get_user_sal(off, arg3);
11113             if (is_error(ret)) {
11114                 return ret;
11115             }
11116             offp = &off;
11117         }
11118         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11119         if (!is_error(ret) && arg3) {
11120             abi_long ret2 = put_user_sal(off, arg3);
11121             if (is_error(ret2)) {
11122                 ret = ret2;
11123             }
11124         }
11125         return ret;
11126     }
11127 #endif
11128 #ifdef TARGET_NR_sendfile64
11129     case TARGET_NR_sendfile64:
11130     {
11131         off_t *offp = NULL;
11132         off_t off;
11133         if (arg3) {
11134             ret = get_user_s64(off, arg3);
11135             if (is_error(ret)) {
11136                 return ret;
11137             }
11138             offp = &off;
11139         }
11140         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11141         if (!is_error(ret) && arg3) {
11142             abi_long ret2 = put_user_s64(off, arg3);
11143             if (is_error(ret2)) {
11144                 ret = ret2;
11145             }
11146         }
11147         return ret;
11148     }
11149 #endif
11150 #endif
11151 #ifdef TARGET_NR_vfork
11152     case TARGET_NR_vfork:
11153         return get_errno(do_fork(cpu_env,
11154                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11155                          0, 0, 0, 0));
11156 #endif
11157 #ifdef TARGET_NR_ugetrlimit
11158     case TARGET_NR_ugetrlimit:
11159     {
11160 	struct rlimit rlim;
11161 	int resource = target_to_host_resource(arg1);
11162 	ret = get_errno(getrlimit(resource, &rlim));
11163 	if (!is_error(ret)) {
11164 	    struct target_rlimit *target_rlim;
11165             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11166                 return -TARGET_EFAULT;
11167 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11168 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11169             unlock_user_struct(target_rlim, arg2, 1);
11170 	}
11171         return ret;
11172     }
11173 #endif
11174 #ifdef TARGET_NR_truncate64
11175     case TARGET_NR_truncate64:
11176         if (!(p = lock_user_string(arg1)))
11177             return -TARGET_EFAULT;
11178 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11179         unlock_user(p, arg1, 0);
11180         return ret;
11181 #endif
11182 #ifdef TARGET_NR_ftruncate64
11183     case TARGET_NR_ftruncate64:
11184         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11185 #endif
11186 #ifdef TARGET_NR_stat64
11187     case TARGET_NR_stat64:
11188         if (!(p = lock_user_string(arg1))) {
11189             return -TARGET_EFAULT;
11190         }
11191         ret = get_errno(stat(path(p), &st));
11192         unlock_user(p, arg1, 0);
11193         if (!is_error(ret))
11194             ret = host_to_target_stat64(cpu_env, arg2, &st);
11195         return ret;
11196 #endif
11197 #ifdef TARGET_NR_lstat64
11198     case TARGET_NR_lstat64:
11199         if (!(p = lock_user_string(arg1))) {
11200             return -TARGET_EFAULT;
11201         }
11202         ret = get_errno(lstat(path(p), &st));
11203         unlock_user(p, arg1, 0);
11204         if (!is_error(ret))
11205             ret = host_to_target_stat64(cpu_env, arg2, &st);
11206         return ret;
11207 #endif
11208 #ifdef TARGET_NR_fstat64
11209     case TARGET_NR_fstat64:
11210         ret = get_errno(fstat(arg1, &st));
11211         if (!is_error(ret))
11212             ret = host_to_target_stat64(cpu_env, arg2, &st);
11213         return ret;
11214 #endif
11215 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11216 #ifdef TARGET_NR_fstatat64
11217     case TARGET_NR_fstatat64:
11218 #endif
11219 #ifdef TARGET_NR_newfstatat
11220     case TARGET_NR_newfstatat:
11221 #endif
11222         if (!(p = lock_user_string(arg2))) {
11223             return -TARGET_EFAULT;
11224         }
11225         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11226         unlock_user(p, arg2, 0);
11227         if (!is_error(ret))
11228             ret = host_to_target_stat64(cpu_env, arg3, &st);
11229         return ret;
11230 #endif
11231 #if defined(TARGET_NR_statx)
11232     case TARGET_NR_statx:
11233         {
11234             struct target_statx *target_stx;
11235             int dirfd = arg1;
11236             int flags = arg3;
11237 
11238             p = lock_user_string(arg2);
11239             if (p == NULL) {
11240                 return -TARGET_EFAULT;
11241             }
11242 #if defined(__NR_statx)
11243             {
11244                 /*
11245                  * It is assumed that struct statx is architecture independent.
11246                  */
11247                 struct target_statx host_stx;
11248                 int mask = arg4;
11249 
11250                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11251                 if (!is_error(ret)) {
11252                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11253                         unlock_user(p, arg2, 0);
11254                         return -TARGET_EFAULT;
11255                     }
11256                 }
11257 
11258                 if (ret != -TARGET_ENOSYS) {
11259                     unlock_user(p, arg2, 0);
11260                     return ret;
11261                 }
11262             }
11263 #endif
11264             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11265             unlock_user(p, arg2, 0);
11266 
11267             if (!is_error(ret)) {
11268                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11269                     return -TARGET_EFAULT;
11270                 }
11271                 memset(target_stx, 0, sizeof(*target_stx));
11272                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11273                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11274                 __put_user(st.st_ino, &target_stx->stx_ino);
11275                 __put_user(st.st_mode, &target_stx->stx_mode);
11276                 __put_user(st.st_uid, &target_stx->stx_uid);
11277                 __put_user(st.st_gid, &target_stx->stx_gid);
11278                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11279                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11280                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11281                 __put_user(st.st_size, &target_stx->stx_size);
11282                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11283                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11284                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11285                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11286                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11287                 unlock_user_struct(target_stx, arg5, 1);
11288             }
11289         }
11290         return ret;
11291 #endif
11292 #ifdef TARGET_NR_lchown
11293     case TARGET_NR_lchown:
11294         if (!(p = lock_user_string(arg1)))
11295             return -TARGET_EFAULT;
11296         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11297         unlock_user(p, arg1, 0);
11298         return ret;
11299 #endif
11300 #ifdef TARGET_NR_getuid
11301     case TARGET_NR_getuid:
11302         return get_errno(high2lowuid(getuid()));
11303 #endif
11304 #ifdef TARGET_NR_getgid
11305     case TARGET_NR_getgid:
11306         return get_errno(high2lowgid(getgid()));
11307 #endif
11308 #ifdef TARGET_NR_geteuid
11309     case TARGET_NR_geteuid:
11310         return get_errno(high2lowuid(geteuid()));
11311 #endif
11312 #ifdef TARGET_NR_getegid
11313     case TARGET_NR_getegid:
11314         return get_errno(high2lowgid(getegid()));
11315 #endif
11316     case TARGET_NR_setreuid:
11317         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11318     case TARGET_NR_setregid:
11319         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11320     case TARGET_NR_getgroups:
11321         {
11322             int gidsetsize = arg1;
11323             target_id *target_grouplist;
11324             gid_t *grouplist;
11325             int i;
11326 
11327             grouplist = alloca(gidsetsize * sizeof(gid_t));
11328             ret = get_errno(getgroups(gidsetsize, grouplist));
11329             if (gidsetsize == 0)
11330                 return ret;
11331             if (!is_error(ret)) {
11332                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11333                 if (!target_grouplist)
11334                     return -TARGET_EFAULT;
11335                 for(i = 0;i < ret; i++)
11336                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11337                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11338             }
11339         }
11340         return ret;
11341     case TARGET_NR_setgroups:
11342         {
11343             int gidsetsize = arg1;
11344             target_id *target_grouplist;
11345             gid_t *grouplist = NULL;
11346             int i;
11347             if (gidsetsize) {
11348                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11349                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11350                 if (!target_grouplist) {
11351                     return -TARGET_EFAULT;
11352                 }
11353                 for (i = 0; i < gidsetsize; i++) {
11354                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11355                 }
11356                 unlock_user(target_grouplist, arg2, 0);
11357             }
11358             return get_errno(setgroups(gidsetsize, grouplist));
11359         }
11360     case TARGET_NR_fchown:
11361         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11362 #if defined(TARGET_NR_fchownat)
11363     case TARGET_NR_fchownat:
11364         if (!(p = lock_user_string(arg2)))
11365             return -TARGET_EFAULT;
11366         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11367                                  low2highgid(arg4), arg5));
11368         unlock_user(p, arg2, 0);
11369         return ret;
11370 #endif
11371 #ifdef TARGET_NR_setresuid
11372     case TARGET_NR_setresuid:
11373         return get_errno(sys_setresuid(low2highuid(arg1),
11374                                        low2highuid(arg2),
11375                                        low2highuid(arg3)));
11376 #endif
11377 #ifdef TARGET_NR_getresuid
11378     case TARGET_NR_getresuid:
11379         {
11380             uid_t ruid, euid, suid;
11381             ret = get_errno(getresuid(&ruid, &euid, &suid));
11382             if (!is_error(ret)) {
11383                 if (put_user_id(high2lowuid(ruid), arg1)
11384                     || put_user_id(high2lowuid(euid), arg2)
11385                     || put_user_id(high2lowuid(suid), arg3))
11386                     return -TARGET_EFAULT;
11387             }
11388         }
11389         return ret;
11390 #endif
11391 #ifdef TARGET_NR_getresgid
11392     case TARGET_NR_setresgid:
11393         return get_errno(sys_setresgid(low2highgid(arg1),
11394                                        low2highgid(arg2),
11395                                        low2highgid(arg3)));
11396 #endif
11397 #ifdef TARGET_NR_getresgid
11398     case TARGET_NR_getresgid:
11399         {
11400             gid_t rgid, egid, sgid;
11401             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11402             if (!is_error(ret)) {
11403                 if (put_user_id(high2lowgid(rgid), arg1)
11404                     || put_user_id(high2lowgid(egid), arg2)
11405                     || put_user_id(high2lowgid(sgid), arg3))
11406                     return -TARGET_EFAULT;
11407             }
11408         }
11409         return ret;
11410 #endif
11411 #ifdef TARGET_NR_chown
11412     case TARGET_NR_chown:
11413         if (!(p = lock_user_string(arg1)))
11414             return -TARGET_EFAULT;
11415         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11416         unlock_user(p, arg1, 0);
11417         return ret;
11418 #endif
11419     case TARGET_NR_setuid:
11420         return get_errno(sys_setuid(low2highuid(arg1)));
11421     case TARGET_NR_setgid:
11422         return get_errno(sys_setgid(low2highgid(arg1)));
11423     case TARGET_NR_setfsuid:
11424         return get_errno(setfsuid(arg1));
11425     case TARGET_NR_setfsgid:
11426         return get_errno(setfsgid(arg1));
11427 
11428 #ifdef TARGET_NR_lchown32
11429     case TARGET_NR_lchown32:
11430         if (!(p = lock_user_string(arg1)))
11431             return -TARGET_EFAULT;
11432         ret = get_errno(lchown(p, arg2, arg3));
11433         unlock_user(p, arg1, 0);
11434         return ret;
11435 #endif
11436 #ifdef TARGET_NR_getuid32
11437     case TARGET_NR_getuid32:
11438         return get_errno(getuid());
11439 #endif
11440 
11441 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11442    /* Alpha specific */
11443     case TARGET_NR_getxuid:
11444          {
11445             uid_t euid;
11446             euid=geteuid();
11447             cpu_env->ir[IR_A4]=euid;
11448          }
11449         return get_errno(getuid());
11450 #endif
11451 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11452    /* Alpha specific */
11453     case TARGET_NR_getxgid:
11454          {
11455             uid_t egid;
11456             egid=getegid();
11457             cpu_env->ir[IR_A4]=egid;
11458          }
11459         return get_errno(getgid());
11460 #endif
11461 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11462     /* Alpha specific */
11463     case TARGET_NR_osf_getsysinfo:
11464         ret = -TARGET_EOPNOTSUPP;
11465         switch (arg1) {
11466           case TARGET_GSI_IEEE_FP_CONTROL:
11467             {
11468                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11469                 uint64_t swcr = cpu_env->swcr;
11470 
11471                 swcr &= ~SWCR_STATUS_MASK;
11472                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11473 
11474                 if (put_user_u64 (swcr, arg2))
11475                         return -TARGET_EFAULT;
11476                 ret = 0;
11477             }
11478             break;
11479 
11480           /* case GSI_IEEE_STATE_AT_SIGNAL:
11481              -- Not implemented in linux kernel.
11482              case GSI_UACPROC:
11483              -- Retrieves current unaligned access state; not much used.
11484              case GSI_PROC_TYPE:
11485              -- Retrieves implver information; surely not used.
11486              case GSI_GET_HWRPB:
11487              -- Grabs a copy of the HWRPB; surely not used.
11488           */
11489         }
11490         return ret;
11491 #endif
11492 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11493     /* Alpha specific */
11494     case TARGET_NR_osf_setsysinfo:
11495         ret = -TARGET_EOPNOTSUPP;
11496         switch (arg1) {
11497           case TARGET_SSI_IEEE_FP_CONTROL:
11498             {
11499                 uint64_t swcr, fpcr;
11500 
11501                 if (get_user_u64 (swcr, arg2)) {
11502                     return -TARGET_EFAULT;
11503                 }
11504 
11505                 /*
11506                  * The kernel calls swcr_update_status to update the
11507                  * status bits from the fpcr at every point that it
11508                  * could be queried.  Therefore, we store the status
11509                  * bits only in FPCR.
11510                  */
11511                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11512 
11513                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11514                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11515                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11516                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11517                 ret = 0;
11518             }
11519             break;
11520 
11521           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11522             {
11523                 uint64_t exc, fpcr, fex;
11524 
11525                 if (get_user_u64(exc, arg2)) {
11526                     return -TARGET_EFAULT;
11527                 }
11528                 exc &= SWCR_STATUS_MASK;
11529                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11530 
11531                 /* Old exceptions are not signaled.  */
11532                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11533                 fex = exc & ~fex;
11534                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11535                 fex &= (cpu_env)->swcr;
11536 
11537                 /* Update the hardware fpcr.  */
11538                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11539                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11540 
11541                 if (fex) {
11542                     int si_code = TARGET_FPE_FLTUNK;
11543                     target_siginfo_t info;
11544 
11545                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11546                         si_code = TARGET_FPE_FLTUND;
11547                     }
11548                     if (fex & SWCR_TRAP_ENABLE_INE) {
11549                         si_code = TARGET_FPE_FLTRES;
11550                     }
11551                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11552                         si_code = TARGET_FPE_FLTUND;
11553                     }
11554                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11555                         si_code = TARGET_FPE_FLTOVF;
11556                     }
11557                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11558                         si_code = TARGET_FPE_FLTDIV;
11559                     }
11560                     if (fex & SWCR_TRAP_ENABLE_INV) {
11561                         si_code = TARGET_FPE_FLTINV;
11562                     }
11563 
11564                     info.si_signo = SIGFPE;
11565                     info.si_errno = 0;
11566                     info.si_code = si_code;
11567                     info._sifields._sigfault._addr = (cpu_env)->pc;
11568                     queue_signal(cpu_env, info.si_signo,
11569                                  QEMU_SI_FAULT, &info);
11570                 }
11571                 ret = 0;
11572             }
11573             break;
11574 
11575           /* case SSI_NVPAIRS:
11576              -- Used with SSIN_UACPROC to enable unaligned accesses.
11577              case SSI_IEEE_STATE_AT_SIGNAL:
11578              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11579              -- Not implemented in linux kernel
11580           */
11581         }
11582         return ret;
11583 #endif
11584 #ifdef TARGET_NR_osf_sigprocmask
11585     /* Alpha specific.  */
11586     case TARGET_NR_osf_sigprocmask:
11587         {
11588             abi_ulong mask;
11589             int how;
11590             sigset_t set, oldset;
11591 
11592             switch(arg1) {
11593             case TARGET_SIG_BLOCK:
11594                 how = SIG_BLOCK;
11595                 break;
11596             case TARGET_SIG_UNBLOCK:
11597                 how = SIG_UNBLOCK;
11598                 break;
11599             case TARGET_SIG_SETMASK:
11600                 how = SIG_SETMASK;
11601                 break;
11602             default:
11603                 return -TARGET_EINVAL;
11604             }
11605             mask = arg2;
11606             target_to_host_old_sigset(&set, &mask);
11607             ret = do_sigprocmask(how, &set, &oldset);
11608             if (!ret) {
11609                 host_to_target_old_sigset(&mask, &oldset);
11610                 ret = mask;
11611             }
11612         }
11613         return ret;
11614 #endif
11615 
11616 #ifdef TARGET_NR_getgid32
11617     case TARGET_NR_getgid32:
11618         return get_errno(getgid());
11619 #endif
11620 #ifdef TARGET_NR_geteuid32
11621     case TARGET_NR_geteuid32:
11622         return get_errno(geteuid());
11623 #endif
11624 #ifdef TARGET_NR_getegid32
11625     case TARGET_NR_getegid32:
11626         return get_errno(getegid());
11627 #endif
11628 #ifdef TARGET_NR_setreuid32
11629     case TARGET_NR_setreuid32:
11630         return get_errno(setreuid(arg1, arg2));
11631 #endif
11632 #ifdef TARGET_NR_setregid32
11633     case TARGET_NR_setregid32:
11634         return get_errno(setregid(arg1, arg2));
11635 #endif
11636 #ifdef TARGET_NR_getgroups32
11637     case TARGET_NR_getgroups32:
11638         {
11639             int gidsetsize = arg1;
11640             uint32_t *target_grouplist;
11641             gid_t *grouplist;
11642             int i;
11643 
11644             grouplist = alloca(gidsetsize * sizeof(gid_t));
11645             ret = get_errno(getgroups(gidsetsize, grouplist));
11646             if (gidsetsize == 0)
11647                 return ret;
11648             if (!is_error(ret)) {
11649                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11650                 if (!target_grouplist) {
11651                     return -TARGET_EFAULT;
11652                 }
11653                 for(i = 0;i < ret; i++)
11654                     target_grouplist[i] = tswap32(grouplist[i]);
11655                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11656             }
11657         }
11658         return ret;
11659 #endif
11660 #ifdef TARGET_NR_setgroups32
11661     case TARGET_NR_setgroups32:
11662         {
11663             int gidsetsize = arg1;
11664             uint32_t *target_grouplist;
11665             gid_t *grouplist;
11666             int i;
11667 
11668             grouplist = alloca(gidsetsize * sizeof(gid_t));
11669             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11670             if (!target_grouplist) {
11671                 return -TARGET_EFAULT;
11672             }
11673             for(i = 0;i < gidsetsize; i++)
11674                 grouplist[i] = tswap32(target_grouplist[i]);
11675             unlock_user(target_grouplist, arg2, 0);
11676             return get_errno(setgroups(gidsetsize, grouplist));
11677         }
11678 #endif
11679 #ifdef TARGET_NR_fchown32
11680     case TARGET_NR_fchown32:
11681         return get_errno(fchown(arg1, arg2, arg3));
11682 #endif
11683 #ifdef TARGET_NR_setresuid32
11684     case TARGET_NR_setresuid32:
11685         return get_errno(sys_setresuid(arg1, arg2, arg3));
11686 #endif
11687 #ifdef TARGET_NR_getresuid32
11688     case TARGET_NR_getresuid32:
11689         {
11690             uid_t ruid, euid, suid;
11691             ret = get_errno(getresuid(&ruid, &euid, &suid));
11692             if (!is_error(ret)) {
11693                 if (put_user_u32(ruid, arg1)
11694                     || put_user_u32(euid, arg2)
11695                     || put_user_u32(suid, arg3))
11696                     return -TARGET_EFAULT;
11697             }
11698         }
11699         return ret;
11700 #endif
11701 #ifdef TARGET_NR_setresgid32
11702     case TARGET_NR_setresgid32:
11703         return get_errno(sys_setresgid(arg1, arg2, arg3));
11704 #endif
11705 #ifdef TARGET_NR_getresgid32
11706     case TARGET_NR_getresgid32:
11707         {
11708             gid_t rgid, egid, sgid;
11709             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11710             if (!is_error(ret)) {
11711                 if (put_user_u32(rgid, arg1)
11712                     || put_user_u32(egid, arg2)
11713                     || put_user_u32(sgid, arg3))
11714                     return -TARGET_EFAULT;
11715             }
11716         }
11717         return ret;
11718 #endif
11719 #ifdef TARGET_NR_chown32
11720     case TARGET_NR_chown32:
11721         if (!(p = lock_user_string(arg1)))
11722             return -TARGET_EFAULT;
11723         ret = get_errno(chown(p, arg2, arg3));
11724         unlock_user(p, arg1, 0);
11725         return ret;
11726 #endif
11727 #ifdef TARGET_NR_setuid32
11728     case TARGET_NR_setuid32:
11729         return get_errno(sys_setuid(arg1));
11730 #endif
11731 #ifdef TARGET_NR_setgid32
11732     case TARGET_NR_setgid32:
11733         return get_errno(sys_setgid(arg1));
11734 #endif
11735 #ifdef TARGET_NR_setfsuid32
11736     case TARGET_NR_setfsuid32:
11737         return get_errno(setfsuid(arg1));
11738 #endif
11739 #ifdef TARGET_NR_setfsgid32
11740     case TARGET_NR_setfsgid32:
11741         return get_errno(setfsgid(arg1));
11742 #endif
11743 #ifdef TARGET_NR_mincore
11744     case TARGET_NR_mincore:
11745         {
11746             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11747             if (!a) {
11748                 return -TARGET_ENOMEM;
11749             }
11750             p = lock_user_string(arg3);
11751             if (!p) {
11752                 ret = -TARGET_EFAULT;
11753             } else {
11754                 ret = get_errno(mincore(a, arg2, p));
11755                 unlock_user(p, arg3, ret);
11756             }
11757             unlock_user(a, arg1, 0);
11758         }
11759         return ret;
11760 #endif
11761 #ifdef TARGET_NR_arm_fadvise64_64
11762     case TARGET_NR_arm_fadvise64_64:
11763         /* arm_fadvise64_64 looks like fadvise64_64 but
11764          * with different argument order: fd, advice, offset, len
11765          * rather than the usual fd, offset, len, advice.
11766          * Note that offset and len are both 64-bit so appear as
11767          * pairs of 32-bit registers.
11768          */
11769         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11770                             target_offset64(arg5, arg6), arg2);
11771         return -host_to_target_errno(ret);
11772 #endif
11773 
11774 #if TARGET_ABI_BITS == 32
11775 
11776 #ifdef TARGET_NR_fadvise64_64
11777     case TARGET_NR_fadvise64_64:
11778 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11779         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11780         ret = arg2;
11781         arg2 = arg3;
11782         arg3 = arg4;
11783         arg4 = arg5;
11784         arg5 = arg6;
11785         arg6 = ret;
11786 #else
11787         /* 6 args: fd, offset (high, low), len (high, low), advice */
11788         if (regpairs_aligned(cpu_env, num)) {
11789             /* offset is in (3,4), len in (5,6) and advice in 7 */
11790             arg2 = arg3;
11791             arg3 = arg4;
11792             arg4 = arg5;
11793             arg5 = arg6;
11794             arg6 = arg7;
11795         }
11796 #endif
11797         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11798                             target_offset64(arg4, arg5), arg6);
11799         return -host_to_target_errno(ret);
11800 #endif
11801 
11802 #ifdef TARGET_NR_fadvise64
11803     case TARGET_NR_fadvise64:
11804         /* 5 args: fd, offset (high, low), len, advice */
11805         if (regpairs_aligned(cpu_env, num)) {
11806             /* offset is in (3,4), len in 5 and advice in 6 */
11807             arg2 = arg3;
11808             arg3 = arg4;
11809             arg4 = arg5;
11810             arg5 = arg6;
11811         }
11812         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11813         return -host_to_target_errno(ret);
11814 #endif
11815 
11816 #else /* not a 32-bit ABI */
11817 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11818 #ifdef TARGET_NR_fadvise64_64
11819     case TARGET_NR_fadvise64_64:
11820 #endif
11821 #ifdef TARGET_NR_fadvise64
11822     case TARGET_NR_fadvise64:
11823 #endif
11824 #ifdef TARGET_S390X
11825         switch (arg4) {
11826         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11827         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11828         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11829         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11830         default: break;
11831         }
11832 #endif
11833         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11834 #endif
11835 #endif /* end of 64-bit ABI fadvise handling */
11836 
11837 #ifdef TARGET_NR_madvise
11838     case TARGET_NR_madvise:
11839         return target_madvise(arg1, arg2, arg3);
11840 #endif
11841 #ifdef TARGET_NR_fcntl64
11842     case TARGET_NR_fcntl64:
11843     {
11844         int cmd;
11845         struct flock64 fl;
11846         from_flock64_fn *copyfrom = copy_from_user_flock64;
11847         to_flock64_fn *copyto = copy_to_user_flock64;
11848 
11849 #ifdef TARGET_ARM
11850         if (!cpu_env->eabi) {
11851             copyfrom = copy_from_user_oabi_flock64;
11852             copyto = copy_to_user_oabi_flock64;
11853         }
11854 #endif
11855 
11856         cmd = target_to_host_fcntl_cmd(arg2);
11857         if (cmd == -TARGET_EINVAL) {
11858             return cmd;
11859         }
11860 
11861         switch(arg2) {
11862         case TARGET_F_GETLK64:
11863             ret = copyfrom(&fl, arg3);
11864             if (ret) {
11865                 break;
11866             }
11867             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11868             if (ret == 0) {
11869                 ret = copyto(arg3, &fl);
11870             }
11871 	    break;
11872 
11873         case TARGET_F_SETLK64:
11874         case TARGET_F_SETLKW64:
11875             ret = copyfrom(&fl, arg3);
11876             if (ret) {
11877                 break;
11878             }
11879             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11880 	    break;
11881         default:
11882             ret = do_fcntl(arg1, arg2, arg3);
11883             break;
11884         }
11885         return ret;
11886     }
11887 #endif
11888 #ifdef TARGET_NR_cacheflush
11889     case TARGET_NR_cacheflush:
11890         /* self-modifying code is handled automatically, so nothing needed */
11891         return 0;
11892 #endif
11893 #ifdef TARGET_NR_getpagesize
11894     case TARGET_NR_getpagesize:
11895         return TARGET_PAGE_SIZE;
11896 #endif
11897     case TARGET_NR_gettid:
11898         return get_errno(sys_gettid());
11899 #ifdef TARGET_NR_readahead
11900     case TARGET_NR_readahead:
11901 #if TARGET_ABI_BITS == 32
11902         if (regpairs_aligned(cpu_env, num)) {
11903             arg2 = arg3;
11904             arg3 = arg4;
11905             arg4 = arg5;
11906         }
11907         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11908 #else
11909         ret = get_errno(readahead(arg1, arg2, arg3));
11910 #endif
11911         return ret;
11912 #endif
11913 #ifdef CONFIG_ATTR
11914 #ifdef TARGET_NR_setxattr
11915     case TARGET_NR_listxattr:
11916     case TARGET_NR_llistxattr:
11917     {
11918         void *p, *b = 0;
11919         if (arg2) {
11920             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11921             if (!b) {
11922                 return -TARGET_EFAULT;
11923             }
11924         }
11925         p = lock_user_string(arg1);
11926         if (p) {
11927             if (num == TARGET_NR_listxattr) {
11928                 ret = get_errno(listxattr(p, b, arg3));
11929             } else {
11930                 ret = get_errno(llistxattr(p, b, arg3));
11931             }
11932         } else {
11933             ret = -TARGET_EFAULT;
11934         }
11935         unlock_user(p, arg1, 0);
11936         unlock_user(b, arg2, arg3);
11937         return ret;
11938     }
11939     case TARGET_NR_flistxattr:
11940     {
11941         void *b = 0;
11942         if (arg2) {
11943             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11944             if (!b) {
11945                 return -TARGET_EFAULT;
11946             }
11947         }
11948         ret = get_errno(flistxattr(arg1, b, arg3));
11949         unlock_user(b, arg2, arg3);
11950         return ret;
11951     }
11952     case TARGET_NR_setxattr:
11953     case TARGET_NR_lsetxattr:
11954         {
11955             void *p, *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             p = lock_user_string(arg1);
11963             n = lock_user_string(arg2);
11964             if (p && n) {
11965                 if (num == TARGET_NR_setxattr) {
11966                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11967                 } else {
11968                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11969                 }
11970             } else {
11971                 ret = -TARGET_EFAULT;
11972             }
11973             unlock_user(p, arg1, 0);
11974             unlock_user(n, arg2, 0);
11975             unlock_user(v, arg3, 0);
11976         }
11977         return ret;
11978     case TARGET_NR_fsetxattr:
11979         {
11980             void *n, *v = 0;
11981             if (arg3) {
11982                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11983                 if (!v) {
11984                     return -TARGET_EFAULT;
11985                 }
11986             }
11987             n = lock_user_string(arg2);
11988             if (n) {
11989                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(n, arg2, 0);
11994             unlock_user(v, arg3, 0);
11995         }
11996         return ret;
11997     case TARGET_NR_getxattr:
11998     case TARGET_NR_lgetxattr:
11999         {
12000             void *p, *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             p = lock_user_string(arg1);
12008             n = lock_user_string(arg2);
12009             if (p && n) {
12010                 if (num == TARGET_NR_getxattr) {
12011                     ret = get_errno(getxattr(p, n, v, arg4));
12012                 } else {
12013                     ret = get_errno(lgetxattr(p, n, v, arg4));
12014                 }
12015             } else {
12016                 ret = -TARGET_EFAULT;
12017             }
12018             unlock_user(p, arg1, 0);
12019             unlock_user(n, arg2, 0);
12020             unlock_user(v, arg3, arg4);
12021         }
12022         return ret;
12023     case TARGET_NR_fgetxattr:
12024         {
12025             void *n, *v = 0;
12026             if (arg3) {
12027                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12028                 if (!v) {
12029                     return -TARGET_EFAULT;
12030                 }
12031             }
12032             n = lock_user_string(arg2);
12033             if (n) {
12034                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12035             } else {
12036                 ret = -TARGET_EFAULT;
12037             }
12038             unlock_user(n, arg2, 0);
12039             unlock_user(v, arg3, arg4);
12040         }
12041         return ret;
12042     case TARGET_NR_removexattr:
12043     case TARGET_NR_lremovexattr:
12044         {
12045             void *p, *n;
12046             p = lock_user_string(arg1);
12047             n = lock_user_string(arg2);
12048             if (p && n) {
12049                 if (num == TARGET_NR_removexattr) {
12050                     ret = get_errno(removexattr(p, n));
12051                 } else {
12052                     ret = get_errno(lremovexattr(p, n));
12053                 }
12054             } else {
12055                 ret = -TARGET_EFAULT;
12056             }
12057             unlock_user(p, arg1, 0);
12058             unlock_user(n, arg2, 0);
12059         }
12060         return ret;
12061     case TARGET_NR_fremovexattr:
12062         {
12063             void *n;
12064             n = lock_user_string(arg2);
12065             if (n) {
12066                 ret = get_errno(fremovexattr(arg1, n));
12067             } else {
12068                 ret = -TARGET_EFAULT;
12069             }
12070             unlock_user(n, arg2, 0);
12071         }
12072         return ret;
12073 #endif
12074 #endif /* CONFIG_ATTR */
12075 #ifdef TARGET_NR_set_thread_area
12076     case TARGET_NR_set_thread_area:
12077 #if defined(TARGET_MIPS)
12078       cpu_env->active_tc.CP0_UserLocal = arg1;
12079       return 0;
12080 #elif defined(TARGET_CRIS)
12081       if (arg1 & 0xff)
12082           ret = -TARGET_EINVAL;
12083       else {
12084           cpu_env->pregs[PR_PID] = arg1;
12085           ret = 0;
12086       }
12087       return ret;
12088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12089       return do_set_thread_area(cpu_env, arg1);
12090 #elif defined(TARGET_M68K)
12091       {
12092           TaskState *ts = cpu->opaque;
12093           ts->tp_value = arg1;
12094           return 0;
12095       }
12096 #else
12097       return -TARGET_ENOSYS;
12098 #endif
12099 #endif
12100 #ifdef TARGET_NR_get_thread_area
12101     case TARGET_NR_get_thread_area:
12102 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12103         return do_get_thread_area(cpu_env, arg1);
12104 #elif defined(TARGET_M68K)
12105         {
12106             TaskState *ts = cpu->opaque;
12107             return ts->tp_value;
12108         }
12109 #else
12110         return -TARGET_ENOSYS;
12111 #endif
12112 #endif
12113 #ifdef TARGET_NR_getdomainname
12114     case TARGET_NR_getdomainname:
12115         return -TARGET_ENOSYS;
12116 #endif
12117 
12118 #ifdef TARGET_NR_clock_settime
12119     case TARGET_NR_clock_settime:
12120     {
12121         struct timespec ts;
12122 
12123         ret = target_to_host_timespec(&ts, arg2);
12124         if (!is_error(ret)) {
12125             ret = get_errno(clock_settime(arg1, &ts));
12126         }
12127         return ret;
12128     }
12129 #endif
12130 #ifdef TARGET_NR_clock_settime64
12131     case TARGET_NR_clock_settime64:
12132     {
12133         struct timespec ts;
12134 
12135         ret = target_to_host_timespec64(&ts, arg2);
12136         if (!is_error(ret)) {
12137             ret = get_errno(clock_settime(arg1, &ts));
12138         }
12139         return ret;
12140     }
12141 #endif
12142 #ifdef TARGET_NR_clock_gettime
12143     case TARGET_NR_clock_gettime:
12144     {
12145         struct timespec ts;
12146         ret = get_errno(clock_gettime(arg1, &ts));
12147         if (!is_error(ret)) {
12148             ret = host_to_target_timespec(arg2, &ts);
12149         }
12150         return ret;
12151     }
12152 #endif
12153 #ifdef TARGET_NR_clock_gettime64
12154     case TARGET_NR_clock_gettime64:
12155     {
12156         struct timespec ts;
12157         ret = get_errno(clock_gettime(arg1, &ts));
12158         if (!is_error(ret)) {
12159             ret = host_to_target_timespec64(arg2, &ts);
12160         }
12161         return ret;
12162     }
12163 #endif
12164 #ifdef TARGET_NR_clock_getres
12165     case TARGET_NR_clock_getres:
12166     {
12167         struct timespec ts;
12168         ret = get_errno(clock_getres(arg1, &ts));
12169         if (!is_error(ret)) {
12170             host_to_target_timespec(arg2, &ts);
12171         }
12172         return ret;
12173     }
12174 #endif
12175 #ifdef TARGET_NR_clock_getres_time64
12176     case TARGET_NR_clock_getres_time64:
12177     {
12178         struct timespec ts;
12179         ret = get_errno(clock_getres(arg1, &ts));
12180         if (!is_error(ret)) {
12181             host_to_target_timespec64(arg2, &ts);
12182         }
12183         return ret;
12184     }
12185 #endif
12186 #ifdef TARGET_NR_clock_nanosleep
12187     case TARGET_NR_clock_nanosleep:
12188     {
12189         struct timespec ts;
12190         if (target_to_host_timespec(&ts, arg3)) {
12191             return -TARGET_EFAULT;
12192         }
12193         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12194                                              &ts, arg4 ? &ts : NULL));
12195         /*
12196          * if the call is interrupted by a signal handler, it fails
12197          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12198          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12199          */
12200         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12201             host_to_target_timespec(arg4, &ts)) {
12202               return -TARGET_EFAULT;
12203         }
12204 
12205         return ret;
12206     }
12207 #endif
12208 #ifdef TARGET_NR_clock_nanosleep_time64
12209     case TARGET_NR_clock_nanosleep_time64:
12210     {
12211         struct timespec ts;
12212 
12213         if (target_to_host_timespec64(&ts, arg3)) {
12214             return -TARGET_EFAULT;
12215         }
12216 
12217         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12218                                              &ts, arg4 ? &ts : NULL));
12219 
12220         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12221             host_to_target_timespec64(arg4, &ts)) {
12222             return -TARGET_EFAULT;
12223         }
12224         return ret;
12225     }
12226 #endif
12227 
12228 #if defined(TARGET_NR_set_tid_address)
12229     case TARGET_NR_set_tid_address:
12230     {
12231         TaskState *ts = cpu->opaque;
12232         ts->child_tidptr = arg1;
12233         /* do not call host set_tid_address() syscall, instead return tid() */
12234         return get_errno(sys_gettid());
12235     }
12236 #endif
12237 
12238     case TARGET_NR_tkill:
12239         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12240 
12241     case TARGET_NR_tgkill:
12242         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12243                          target_to_host_signal(arg3)));
12244 
12245 #ifdef TARGET_NR_set_robust_list
12246     case TARGET_NR_set_robust_list:
12247     case TARGET_NR_get_robust_list:
12248         /* The ABI for supporting robust futexes has userspace pass
12249          * the kernel a pointer to a linked list which is updated by
12250          * userspace after the syscall; the list is walked by the kernel
12251          * when the thread exits. Since the linked list in QEMU guest
12252          * memory isn't a valid linked list for the host and we have
12253          * no way to reliably intercept the thread-death event, we can't
12254          * support these. Silently return ENOSYS so that guest userspace
12255          * falls back to a non-robust futex implementation (which should
12256          * be OK except in the corner case of the guest crashing while
12257          * holding a mutex that is shared with another process via
12258          * shared memory).
12259          */
12260         return -TARGET_ENOSYS;
12261 #endif
12262 
12263 #if defined(TARGET_NR_utimensat)
12264     case TARGET_NR_utimensat:
12265         {
12266             struct timespec *tsp, ts[2];
12267             if (!arg3) {
12268                 tsp = NULL;
12269             } else {
12270                 if (target_to_host_timespec(ts, arg3)) {
12271                     return -TARGET_EFAULT;
12272                 }
12273                 if (target_to_host_timespec(ts + 1, arg3 +
12274                                             sizeof(struct target_timespec))) {
12275                     return -TARGET_EFAULT;
12276                 }
12277                 tsp = ts;
12278             }
12279             if (!arg2)
12280                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12281             else {
12282                 if (!(p = lock_user_string(arg2))) {
12283                     return -TARGET_EFAULT;
12284                 }
12285                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12286                 unlock_user(p, arg2, 0);
12287             }
12288         }
12289         return ret;
12290 #endif
12291 #ifdef TARGET_NR_utimensat_time64
12292     case TARGET_NR_utimensat_time64:
12293         {
12294             struct timespec *tsp, ts[2];
12295             if (!arg3) {
12296                 tsp = NULL;
12297             } else {
12298                 if (target_to_host_timespec64(ts, arg3)) {
12299                     return -TARGET_EFAULT;
12300                 }
12301                 if (target_to_host_timespec64(ts + 1, arg3 +
12302                                      sizeof(struct target__kernel_timespec))) {
12303                     return -TARGET_EFAULT;
12304                 }
12305                 tsp = ts;
12306             }
12307             if (!arg2)
12308                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12309             else {
12310                 p = lock_user_string(arg2);
12311                 if (!p) {
12312                     return -TARGET_EFAULT;
12313                 }
12314                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12315                 unlock_user(p, arg2, 0);
12316             }
12317         }
12318         return ret;
12319 #endif
12320 #ifdef TARGET_NR_futex
12321     case TARGET_NR_futex:
12322         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12323 #endif
12324 #ifdef TARGET_NR_futex_time64
12325     case TARGET_NR_futex_time64:
12326         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12327 #endif
12328 #ifdef CONFIG_INOTIFY
12329 #if defined(TARGET_NR_inotify_init)
12330     case TARGET_NR_inotify_init:
12331         ret = get_errno(inotify_init());
12332         if (ret >= 0) {
12333             fd_trans_register(ret, &target_inotify_trans);
12334         }
12335         return ret;
12336 #endif
12337 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12338     case TARGET_NR_inotify_init1:
12339         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12340                                           fcntl_flags_tbl)));
12341         if (ret >= 0) {
12342             fd_trans_register(ret, &target_inotify_trans);
12343         }
12344         return ret;
12345 #endif
12346 #if defined(TARGET_NR_inotify_add_watch)
12347     case TARGET_NR_inotify_add_watch:
12348         p = lock_user_string(arg2);
12349         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12350         unlock_user(p, arg2, 0);
12351         return ret;
12352 #endif
12353 #if defined(TARGET_NR_inotify_rm_watch)
12354     case TARGET_NR_inotify_rm_watch:
12355         return get_errno(inotify_rm_watch(arg1, arg2));
12356 #endif
12357 #endif
12358 
12359 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12360     case TARGET_NR_mq_open:
12361         {
12362             struct mq_attr posix_mq_attr;
12363             struct mq_attr *pposix_mq_attr;
12364             int host_flags;
12365 
12366             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12367             pposix_mq_attr = NULL;
12368             if (arg4) {
12369                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12370                     return -TARGET_EFAULT;
12371                 }
12372                 pposix_mq_attr = &posix_mq_attr;
12373             }
12374             p = lock_user_string(arg1 - 1);
12375             if (!p) {
12376                 return -TARGET_EFAULT;
12377             }
12378             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12379             unlock_user (p, arg1, 0);
12380         }
12381         return ret;
12382 
12383     case TARGET_NR_mq_unlink:
12384         p = lock_user_string(arg1 - 1);
12385         if (!p) {
12386             return -TARGET_EFAULT;
12387         }
12388         ret = get_errno(mq_unlink(p));
12389         unlock_user (p, arg1, 0);
12390         return ret;
12391 
12392 #ifdef TARGET_NR_mq_timedsend
12393     case TARGET_NR_mq_timedsend:
12394         {
12395             struct timespec ts;
12396 
12397             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12398             if (arg5 != 0) {
12399                 if (target_to_host_timespec(&ts, arg5)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12403                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12404                     return -TARGET_EFAULT;
12405                 }
12406             } else {
12407                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12408             }
12409             unlock_user (p, arg2, arg3);
12410         }
12411         return ret;
12412 #endif
12413 #ifdef TARGET_NR_mq_timedsend_time64
12414     case TARGET_NR_mq_timedsend_time64:
12415         {
12416             struct timespec ts;
12417 
12418             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12419             if (arg5 != 0) {
12420                 if (target_to_host_timespec64(&ts, arg5)) {
12421                     return -TARGET_EFAULT;
12422                 }
12423                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12424                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12425                     return -TARGET_EFAULT;
12426                 }
12427             } else {
12428                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12429             }
12430             unlock_user(p, arg2, arg3);
12431         }
12432         return ret;
12433 #endif
12434 
12435 #ifdef TARGET_NR_mq_timedreceive
12436     case TARGET_NR_mq_timedreceive:
12437         {
12438             struct timespec ts;
12439             unsigned int prio;
12440 
12441             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12442             if (arg5 != 0) {
12443                 if (target_to_host_timespec(&ts, arg5)) {
12444                     return -TARGET_EFAULT;
12445                 }
12446                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12447                                                      &prio, &ts));
12448                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12449                     return -TARGET_EFAULT;
12450                 }
12451             } else {
12452                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12453                                                      &prio, NULL));
12454             }
12455             unlock_user (p, arg2, arg3);
12456             if (arg4 != 0)
12457                 put_user_u32(prio, arg4);
12458         }
12459         return ret;
12460 #endif
12461 #ifdef TARGET_NR_mq_timedreceive_time64
12462     case TARGET_NR_mq_timedreceive_time64:
12463         {
12464             struct timespec ts;
12465             unsigned int prio;
12466 
12467             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12468             if (arg5 != 0) {
12469                 if (target_to_host_timespec64(&ts, arg5)) {
12470                     return -TARGET_EFAULT;
12471                 }
12472                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12473                                                      &prio, &ts));
12474                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12475                     return -TARGET_EFAULT;
12476                 }
12477             } else {
12478                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12479                                                      &prio, NULL));
12480             }
12481             unlock_user(p, arg2, arg3);
12482             if (arg4 != 0) {
12483                 put_user_u32(prio, arg4);
12484             }
12485         }
12486         return ret;
12487 #endif
12488 
12489     /* Not implemented for now... */
12490 /*     case TARGET_NR_mq_notify: */
12491 /*         break; */
12492 
12493     case TARGET_NR_mq_getsetattr:
12494         {
12495             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12496             ret = 0;
12497             if (arg2 != 0) {
12498                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12499                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12500                                            &posix_mq_attr_out));
12501             } else if (arg3 != 0) {
12502                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12503             }
12504             if (ret == 0 && arg3 != 0) {
12505                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12506             }
12507         }
12508         return ret;
12509 #endif
12510 
12511 #ifdef CONFIG_SPLICE
12512 #ifdef TARGET_NR_tee
12513     case TARGET_NR_tee:
12514         {
12515             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12516         }
12517         return ret;
12518 #endif
12519 #ifdef TARGET_NR_splice
12520     case TARGET_NR_splice:
12521         {
12522             loff_t loff_in, loff_out;
12523             loff_t *ploff_in = NULL, *ploff_out = NULL;
12524             if (arg2) {
12525                 if (get_user_u64(loff_in, arg2)) {
12526                     return -TARGET_EFAULT;
12527                 }
12528                 ploff_in = &loff_in;
12529             }
12530             if (arg4) {
12531                 if (get_user_u64(loff_out, arg4)) {
12532                     return -TARGET_EFAULT;
12533                 }
12534                 ploff_out = &loff_out;
12535             }
12536             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12537             if (arg2) {
12538                 if (put_user_u64(loff_in, arg2)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             }
12542             if (arg4) {
12543                 if (put_user_u64(loff_out, arg4)) {
12544                     return -TARGET_EFAULT;
12545                 }
12546             }
12547         }
12548         return ret;
12549 #endif
12550 #ifdef TARGET_NR_vmsplice
12551 	case TARGET_NR_vmsplice:
12552         {
12553             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12554             if (vec != NULL) {
12555                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12556                 unlock_iovec(vec, arg2, arg3, 0);
12557             } else {
12558                 ret = -host_to_target_errno(errno);
12559             }
12560         }
12561         return ret;
12562 #endif
12563 #endif /* CONFIG_SPLICE */
12564 #ifdef CONFIG_EVENTFD
12565 #if defined(TARGET_NR_eventfd)
12566     case TARGET_NR_eventfd:
12567         ret = get_errno(eventfd(arg1, 0));
12568         if (ret >= 0) {
12569             fd_trans_register(ret, &target_eventfd_trans);
12570         }
12571         return ret;
12572 #endif
12573 #if defined(TARGET_NR_eventfd2)
12574     case TARGET_NR_eventfd2:
12575     {
12576         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12577         if (arg2 & TARGET_O_NONBLOCK) {
12578             host_flags |= O_NONBLOCK;
12579         }
12580         if (arg2 & TARGET_O_CLOEXEC) {
12581             host_flags |= O_CLOEXEC;
12582         }
12583         ret = get_errno(eventfd(arg1, host_flags));
12584         if (ret >= 0) {
12585             fd_trans_register(ret, &target_eventfd_trans);
12586         }
12587         return ret;
12588     }
12589 #endif
12590 #endif /* CONFIG_EVENTFD  */
12591 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12592     case TARGET_NR_fallocate:
12593 #if TARGET_ABI_BITS == 32
12594         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12595                                   target_offset64(arg5, arg6)));
12596 #else
12597         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12598 #endif
12599         return ret;
12600 #endif
12601 #if defined(CONFIG_SYNC_FILE_RANGE)
12602 #if defined(TARGET_NR_sync_file_range)
12603     case TARGET_NR_sync_file_range:
12604 #if TARGET_ABI_BITS == 32
12605 #if defined(TARGET_MIPS)
12606         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12607                                         target_offset64(arg5, arg6), arg7));
12608 #else
12609         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12610                                         target_offset64(arg4, arg5), arg6));
12611 #endif /* !TARGET_MIPS */
12612 #else
12613         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12614 #endif
12615         return ret;
12616 #endif
12617 #if defined(TARGET_NR_sync_file_range2) || \
12618     defined(TARGET_NR_arm_sync_file_range)
12619 #if defined(TARGET_NR_sync_file_range2)
12620     case TARGET_NR_sync_file_range2:
12621 #endif
12622 #if defined(TARGET_NR_arm_sync_file_range)
12623     case TARGET_NR_arm_sync_file_range:
12624 #endif
12625         /* This is like sync_file_range but the arguments are reordered */
12626 #if TARGET_ABI_BITS == 32
12627         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12628                                         target_offset64(arg5, arg6), arg2));
12629 #else
12630         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12631 #endif
12632         return ret;
12633 #endif
12634 #endif
12635 #if defined(TARGET_NR_signalfd4)
12636     case TARGET_NR_signalfd4:
12637         return do_signalfd4(arg1, arg2, arg4);
12638 #endif
12639 #if defined(TARGET_NR_signalfd)
12640     case TARGET_NR_signalfd:
12641         return do_signalfd4(arg1, arg2, 0);
12642 #endif
12643 #if defined(CONFIG_EPOLL)
12644 #if defined(TARGET_NR_epoll_create)
12645     case TARGET_NR_epoll_create:
12646         return get_errno(epoll_create(arg1));
12647 #endif
12648 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12649     case TARGET_NR_epoll_create1:
12650         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12651 #endif
12652 #if defined(TARGET_NR_epoll_ctl)
12653     case TARGET_NR_epoll_ctl:
12654     {
12655         struct epoll_event ep;
12656         struct epoll_event *epp = 0;
12657         if (arg4) {
12658             if (arg2 != EPOLL_CTL_DEL) {
12659                 struct target_epoll_event *target_ep;
12660                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12661                     return -TARGET_EFAULT;
12662                 }
12663                 ep.events = tswap32(target_ep->events);
12664                 /*
12665                  * The epoll_data_t union is just opaque data to the kernel,
12666                  * so we transfer all 64 bits across and need not worry what
12667                  * actual data type it is.
12668                  */
12669                 ep.data.u64 = tswap64(target_ep->data.u64);
12670                 unlock_user_struct(target_ep, arg4, 0);
12671             }
12672             /*
12673              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12674              * non-null pointer, even though this argument is ignored.
12675              *
12676              */
12677             epp = &ep;
12678         }
12679         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12680     }
12681 #endif
12682 
12683 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12684 #if defined(TARGET_NR_epoll_wait)
12685     case TARGET_NR_epoll_wait:
12686 #endif
12687 #if defined(TARGET_NR_epoll_pwait)
12688     case TARGET_NR_epoll_pwait:
12689 #endif
12690     {
12691         struct target_epoll_event *target_ep;
12692         struct epoll_event *ep;
12693         int epfd = arg1;
12694         int maxevents = arg3;
12695         int timeout = arg4;
12696 
12697         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12698             return -TARGET_EINVAL;
12699         }
12700 
12701         target_ep = lock_user(VERIFY_WRITE, arg2,
12702                               maxevents * sizeof(struct target_epoll_event), 1);
12703         if (!target_ep) {
12704             return -TARGET_EFAULT;
12705         }
12706 
12707         ep = g_try_new(struct epoll_event, maxevents);
12708         if (!ep) {
12709             unlock_user(target_ep, arg2, 0);
12710             return -TARGET_ENOMEM;
12711         }
12712 
12713         switch (num) {
12714 #if defined(TARGET_NR_epoll_pwait)
12715         case TARGET_NR_epoll_pwait:
12716         {
12717             sigset_t *set = NULL;
12718 
12719             if (arg5) {
12720                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12721                 if (ret != 0) {
12722                     break;
12723                 }
12724             }
12725 
12726             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12727                                              set, SIGSET_T_SIZE));
12728 
12729             if (set) {
12730                 finish_sigsuspend_mask(ret);
12731             }
12732             break;
12733         }
12734 #endif
12735 #if defined(TARGET_NR_epoll_wait)
12736         case TARGET_NR_epoll_wait:
12737             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12738                                              NULL, 0));
12739             break;
12740 #endif
12741         default:
12742             ret = -TARGET_ENOSYS;
12743         }
12744         if (!is_error(ret)) {
12745             int i;
12746             for (i = 0; i < ret; i++) {
12747                 target_ep[i].events = tswap32(ep[i].events);
12748                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12749             }
12750             unlock_user(target_ep, arg2,
12751                         ret * sizeof(struct target_epoll_event));
12752         } else {
12753             unlock_user(target_ep, arg2, 0);
12754         }
12755         g_free(ep);
12756         return ret;
12757     }
12758 #endif
12759 #endif
12760 #ifdef TARGET_NR_prlimit64
12761     case TARGET_NR_prlimit64:
12762     {
12763         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12764         struct target_rlimit64 *target_rnew, *target_rold;
12765         struct host_rlimit64 rnew, rold, *rnewp = 0;
12766         int resource = target_to_host_resource(arg2);
12767 
12768         if (arg3 && (resource != RLIMIT_AS &&
12769                      resource != RLIMIT_DATA &&
12770                      resource != RLIMIT_STACK)) {
12771             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12772                 return -TARGET_EFAULT;
12773             }
12774             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12775             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12776             unlock_user_struct(target_rnew, arg3, 0);
12777             rnewp = &rnew;
12778         }
12779 
12780         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12781         if (!is_error(ret) && arg4) {
12782             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12783                 return -TARGET_EFAULT;
12784             }
12785             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12786             target_rold->rlim_max = tswap64(rold.rlim_max);
12787             unlock_user_struct(target_rold, arg4, 1);
12788         }
12789         return ret;
12790     }
12791 #endif
12792 #ifdef TARGET_NR_gethostname
12793     case TARGET_NR_gethostname:
12794     {
12795         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12796         if (name) {
12797             ret = get_errno(gethostname(name, arg2));
12798             unlock_user(name, arg1, arg2);
12799         } else {
12800             ret = -TARGET_EFAULT;
12801         }
12802         return ret;
12803     }
12804 #endif
12805 #ifdef TARGET_NR_atomic_cmpxchg_32
12806     case TARGET_NR_atomic_cmpxchg_32:
12807     {
12808         /* should use start_exclusive from main.c */
12809         abi_ulong mem_value;
12810         if (get_user_u32(mem_value, arg6)) {
12811             target_siginfo_t info;
12812             info.si_signo = SIGSEGV;
12813             info.si_errno = 0;
12814             info.si_code = TARGET_SEGV_MAPERR;
12815             info._sifields._sigfault._addr = arg6;
12816             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12817             ret = 0xdeadbeef;
12818 
12819         }
12820         if (mem_value == arg2)
12821             put_user_u32(arg1, arg6);
12822         return mem_value;
12823     }
12824 #endif
12825 #ifdef TARGET_NR_atomic_barrier
12826     case TARGET_NR_atomic_barrier:
12827         /* Like the kernel implementation and the
12828            qemu arm barrier, no-op this? */
12829         return 0;
12830 #endif
12831 
12832 #ifdef TARGET_NR_timer_create
12833     case TARGET_NR_timer_create:
12834     {
12835         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12836 
12837         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12838 
12839         int clkid = arg1;
12840         int timer_index = next_free_host_timer();
12841 
12842         if (timer_index < 0) {
12843             ret = -TARGET_EAGAIN;
12844         } else {
12845             timer_t *phtimer = g_posix_timers  + timer_index;
12846 
12847             if (arg2) {
12848                 phost_sevp = &host_sevp;
12849                 ret = target_to_host_sigevent(phost_sevp, arg2);
12850                 if (ret != 0) {
12851                     return ret;
12852                 }
12853             }
12854 
12855             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12856             if (ret) {
12857                 phtimer = NULL;
12858             } else {
12859                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12860                     return -TARGET_EFAULT;
12861                 }
12862             }
12863         }
12864         return ret;
12865     }
12866 #endif
12867 
12868 #ifdef TARGET_NR_timer_settime
12869     case TARGET_NR_timer_settime:
12870     {
12871         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12872          * struct itimerspec * old_value */
12873         target_timer_t timerid = get_timer_id(arg1);
12874 
12875         if (timerid < 0) {
12876             ret = timerid;
12877         } else if (arg3 == 0) {
12878             ret = -TARGET_EINVAL;
12879         } else {
12880             timer_t htimer = g_posix_timers[timerid];
12881             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12882 
12883             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12884                 return -TARGET_EFAULT;
12885             }
12886             ret = get_errno(
12887                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12888             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12889                 return -TARGET_EFAULT;
12890             }
12891         }
12892         return ret;
12893     }
12894 #endif
12895 
12896 #ifdef TARGET_NR_timer_settime64
12897     case TARGET_NR_timer_settime64:
12898     {
12899         target_timer_t timerid = get_timer_id(arg1);
12900 
12901         if (timerid < 0) {
12902             ret = timerid;
12903         } else if (arg3 == 0) {
12904             ret = -TARGET_EINVAL;
12905         } else {
12906             timer_t htimer = g_posix_timers[timerid];
12907             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12908 
12909             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12910                 return -TARGET_EFAULT;
12911             }
12912             ret = get_errno(
12913                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12914             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12915                 return -TARGET_EFAULT;
12916             }
12917         }
12918         return ret;
12919     }
12920 #endif
12921 
12922 #ifdef TARGET_NR_timer_gettime
12923     case TARGET_NR_timer_gettime:
12924     {
12925         /* args: timer_t timerid, struct itimerspec *curr_value */
12926         target_timer_t timerid = get_timer_id(arg1);
12927 
12928         if (timerid < 0) {
12929             ret = timerid;
12930         } else if (!arg2) {
12931             ret = -TARGET_EFAULT;
12932         } else {
12933             timer_t htimer = g_posix_timers[timerid];
12934             struct itimerspec hspec;
12935             ret = get_errno(timer_gettime(htimer, &hspec));
12936 
12937             if (host_to_target_itimerspec(arg2, &hspec)) {
12938                 ret = -TARGET_EFAULT;
12939             }
12940         }
12941         return ret;
12942     }
12943 #endif
12944 
12945 #ifdef TARGET_NR_timer_gettime64
12946     case TARGET_NR_timer_gettime64:
12947     {
12948         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12949         target_timer_t timerid = get_timer_id(arg1);
12950 
12951         if (timerid < 0) {
12952             ret = timerid;
12953         } else if (!arg2) {
12954             ret = -TARGET_EFAULT;
12955         } else {
12956             timer_t htimer = g_posix_timers[timerid];
12957             struct itimerspec hspec;
12958             ret = get_errno(timer_gettime(htimer, &hspec));
12959 
12960             if (host_to_target_itimerspec64(arg2, &hspec)) {
12961                 ret = -TARGET_EFAULT;
12962             }
12963         }
12964         return ret;
12965     }
12966 #endif
12967 
12968 #ifdef TARGET_NR_timer_getoverrun
12969     case TARGET_NR_timer_getoverrun:
12970     {
12971         /* args: timer_t timerid */
12972         target_timer_t timerid = get_timer_id(arg1);
12973 
12974         if (timerid < 0) {
12975             ret = timerid;
12976         } else {
12977             timer_t htimer = g_posix_timers[timerid];
12978             ret = get_errno(timer_getoverrun(htimer));
12979         }
12980         return ret;
12981     }
12982 #endif
12983 
12984 #ifdef TARGET_NR_timer_delete
12985     case TARGET_NR_timer_delete:
12986     {
12987         /* args: timer_t timerid */
12988         target_timer_t timerid = get_timer_id(arg1);
12989 
12990         if (timerid < 0) {
12991             ret = timerid;
12992         } else {
12993             timer_t htimer = g_posix_timers[timerid];
12994             ret = get_errno(timer_delete(htimer));
12995             g_posix_timers[timerid] = 0;
12996         }
12997         return ret;
12998     }
12999 #endif
13000 
13001 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13002     case TARGET_NR_timerfd_create:
13003         return get_errno(timerfd_create(arg1,
13004                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13005 #endif
13006 
13007 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13008     case TARGET_NR_timerfd_gettime:
13009         {
13010             struct itimerspec its_curr;
13011 
13012             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13013 
13014             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13015                 return -TARGET_EFAULT;
13016             }
13017         }
13018         return ret;
13019 #endif
13020 
13021 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13022     case TARGET_NR_timerfd_gettime64:
13023         {
13024             struct itimerspec its_curr;
13025 
13026             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13027 
13028             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13029                 return -TARGET_EFAULT;
13030             }
13031         }
13032         return ret;
13033 #endif
13034 
13035 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13036     case TARGET_NR_timerfd_settime:
13037         {
13038             struct itimerspec its_new, its_old, *p_new;
13039 
13040             if (arg3) {
13041                 if (target_to_host_itimerspec(&its_new, arg3)) {
13042                     return -TARGET_EFAULT;
13043                 }
13044                 p_new = &its_new;
13045             } else {
13046                 p_new = NULL;
13047             }
13048 
13049             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13050 
13051             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13052                 return -TARGET_EFAULT;
13053             }
13054         }
13055         return ret;
13056 #endif
13057 
13058 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13059     case TARGET_NR_timerfd_settime64:
13060         {
13061             struct itimerspec its_new, its_old, *p_new;
13062 
13063             if (arg3) {
13064                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13065                     return -TARGET_EFAULT;
13066                 }
13067                 p_new = &its_new;
13068             } else {
13069                 p_new = NULL;
13070             }
13071 
13072             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13073 
13074             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13075                 return -TARGET_EFAULT;
13076             }
13077         }
13078         return ret;
13079 #endif
13080 
13081 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13082     case TARGET_NR_ioprio_get:
13083         return get_errno(ioprio_get(arg1, arg2));
13084 #endif
13085 
13086 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13087     case TARGET_NR_ioprio_set:
13088         return get_errno(ioprio_set(arg1, arg2, arg3));
13089 #endif
13090 
13091 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13092     case TARGET_NR_setns:
13093         return get_errno(setns(arg1, arg2));
13094 #endif
13095 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13096     case TARGET_NR_unshare:
13097         return get_errno(unshare(arg1));
13098 #endif
13099 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13100     case TARGET_NR_kcmp:
13101         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13102 #endif
13103 #ifdef TARGET_NR_swapcontext
13104     case TARGET_NR_swapcontext:
13105         /* PowerPC specific.  */
13106         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13107 #endif
13108 #ifdef TARGET_NR_memfd_create
13109     case TARGET_NR_memfd_create:
13110         p = lock_user_string(arg1);
13111         if (!p) {
13112             return -TARGET_EFAULT;
13113         }
13114         ret = get_errno(memfd_create(p, arg2));
13115         fd_trans_unregister(ret);
13116         unlock_user(p, arg1, 0);
13117         return ret;
13118 #endif
13119 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13120     case TARGET_NR_membarrier:
13121         return get_errno(membarrier(arg1, arg2));
13122 #endif
13123 
13124 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13125     case TARGET_NR_copy_file_range:
13126         {
13127             loff_t inoff, outoff;
13128             loff_t *pinoff = NULL, *poutoff = NULL;
13129 
13130             if (arg2) {
13131                 if (get_user_u64(inoff, arg2)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134                 pinoff = &inoff;
13135             }
13136             if (arg4) {
13137                 if (get_user_u64(outoff, arg4)) {
13138                     return -TARGET_EFAULT;
13139                 }
13140                 poutoff = &outoff;
13141             }
13142             /* Do not sign-extend the count parameter. */
13143             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13144                                                  (abi_ulong)arg5, arg6));
13145             if (!is_error(ret) && ret > 0) {
13146                 if (arg2) {
13147                     if (put_user_u64(inoff, arg2)) {
13148                         return -TARGET_EFAULT;
13149                     }
13150                 }
13151                 if (arg4) {
13152                     if (put_user_u64(outoff, arg4)) {
13153                         return -TARGET_EFAULT;
13154                     }
13155                 }
13156             }
13157         }
13158         return ret;
13159 #endif
13160 
13161 #if defined(TARGET_NR_pivot_root)
13162     case TARGET_NR_pivot_root:
13163         {
13164             void *p2;
13165             p = lock_user_string(arg1); /* new_root */
13166             p2 = lock_user_string(arg2); /* put_old */
13167             if (!p || !p2) {
13168                 ret = -TARGET_EFAULT;
13169             } else {
13170                 ret = get_errno(pivot_root(p, p2));
13171             }
13172             unlock_user(p2, arg2, 0);
13173             unlock_user(p, arg1, 0);
13174         }
13175         return ret;
13176 #endif
13177 
13178     default:
13179         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13180         return -TARGET_ENOSYS;
13181     }
13182     return ret;
13183 }
13184 
13185 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13186                     abi_long arg2, abi_long arg3, abi_long arg4,
13187                     abi_long arg5, abi_long arg6, abi_long arg7,
13188                     abi_long arg8)
13189 {
13190     CPUState *cpu = env_cpu(cpu_env);
13191     abi_long ret;
13192 
13193 #ifdef DEBUG_ERESTARTSYS
13194     /* Debug-only code for exercising the syscall-restart code paths
13195      * in the per-architecture cpu main loops: restart every syscall
13196      * the guest makes once before letting it through.
13197      */
13198     {
13199         static bool flag;
13200         flag = !flag;
13201         if (flag) {
13202             return -QEMU_ERESTARTSYS;
13203         }
13204     }
13205 #endif
13206 
13207     record_syscall_start(cpu, num, arg1,
13208                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13209 
13210     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13211         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13212     }
13213 
13214     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13215                       arg5, arg6, arg7, arg8);
13216 
13217     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13218         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13219                           arg3, arg4, arg5, arg6);
13220     }
13221 
13222     record_syscall_return(cpu, num, ret);
13223     return ret;
13224 }
13225