xref: /openbmc/qemu/linux-user/syscall.c (revision dfe49864)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk, initial_target_brk;
805 static abi_ulong brk_page;
806 
807 void target_set_brk(abi_ulong new_brk)
808 {
809     target_brk = TARGET_PAGE_ALIGN(new_brk);
810     initial_target_brk = target_brk;
811     brk_page = HOST_PAGE_ALIGN(target_brk);
812 }
813 
814 /* do_brk() must return target values and target errnos. */
815 abi_long do_brk(abi_ulong brk_val)
816 {
817     abi_long mapped_addr;
818     abi_ulong new_alloc_size;
819     abi_ulong new_brk, new_host_brk_page;
820 
821     /* brk pointers are always untagged */
822 
823     /* return old brk value if brk_val unchanged or zero */
824     if (!brk_val || brk_val == target_brk) {
825         return target_brk;
826     }
827 
828     /* do not allow to shrink below initial brk value */
829     if (brk_val < initial_target_brk) {
830         brk_val = initial_target_brk;
831     }
832 
833     new_brk = TARGET_PAGE_ALIGN(brk_val);
834     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
835 
836     /* brk_val and old target_brk might be on the same page */
837     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
838         /* empty remaining bytes in (possibly larger) host page */
839         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
840         target_brk = brk_val;
841         return target_brk;
842     }
843 
844     /* Release heap if necesary */
845     if (new_brk < target_brk) {
846         /* empty remaining bytes in (possibly larger) host page */
847         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
848 
849         /* free unused host pages and set new brk_page */
850         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
851         brk_page = new_host_brk_page;
852 
853         target_brk = brk_val;
854         return target_brk;
855     }
856 
857     /* We need to allocate more memory after the brk... Note that
858      * we don't use MAP_FIXED because that will map over the top of
859      * any existing mapping (like the one with the host libc or qemu
860      * itself); instead we treat "mapped but at wrong address" as
861      * a failure and unmap again.
862      */
863     new_alloc_size = new_host_brk_page - brk_page;
864     if (new_alloc_size) {
865         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
866                                         PROT_READ|PROT_WRITE,
867                                         MAP_ANON|MAP_PRIVATE, 0, 0));
868     } else {
869         mapped_addr = brk_page;
870     }
871 
872     if (mapped_addr == brk_page) {
873         /* Heap contents are initialized to zero, as for anonymous
874          * mapped pages.  Technically the new pages are already
875          * initialized to zero since they *are* anonymous mapped
876          * pages, however we have to take care with the contents that
877          * come from the remaining part of the previous page: it may
878          * contains garbage data due to a previous heap usage (grown
879          * then shrunken).  */
880         memset(g2h_untagged(brk_page), 0, HOST_PAGE_ALIGN(brk_page) - brk_page);
881 
882         target_brk = brk_val;
883         brk_page = new_host_brk_page;
884         return target_brk;
885     } else if (mapped_addr != -1) {
886         /* Mapped but at wrong address, meaning there wasn't actually
887          * enough space for this brk.
888          */
889         target_munmap(mapped_addr, new_alloc_size);
890         mapped_addr = -1;
891     }
892 
893 #if defined(TARGET_ALPHA)
894     /* We (partially) emulate OSF/1 on Alpha, which requires we
895        return a proper errno, not an unchanged brk value.  */
896     return -TARGET_ENOMEM;
897 #endif
898     /* For everything else, return the previous break. */
899     return target_brk;
900 }
901 
902 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
903     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
904 static inline abi_long copy_from_user_fdset(fd_set *fds,
905                                             abi_ulong target_fds_addr,
906                                             int n)
907 {
908     int i, nw, j, k;
909     abi_ulong b, *target_fds;
910 
911     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
912     if (!(target_fds = lock_user(VERIFY_READ,
913                                  target_fds_addr,
914                                  sizeof(abi_ulong) * nw,
915                                  1)))
916         return -TARGET_EFAULT;
917 
918     FD_ZERO(fds);
919     k = 0;
920     for (i = 0; i < nw; i++) {
921         /* grab the abi_ulong */
922         __get_user(b, &target_fds[i]);
923         for (j = 0; j < TARGET_ABI_BITS; j++) {
924             /* check the bit inside the abi_ulong */
925             if ((b >> j) & 1)
926                 FD_SET(k, fds);
927             k++;
928         }
929     }
930 
931     unlock_user(target_fds, target_fds_addr, 0);
932 
933     return 0;
934 }
935 
936 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
937                                                  abi_ulong target_fds_addr,
938                                                  int n)
939 {
940     if (target_fds_addr) {
941         if (copy_from_user_fdset(fds, target_fds_addr, n))
942             return -TARGET_EFAULT;
943         *fds_ptr = fds;
944     } else {
945         *fds_ptr = NULL;
946     }
947     return 0;
948 }
949 
950 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
951                                           const fd_set *fds,
952                                           int n)
953 {
954     int i, nw, j, k;
955     abi_long v;
956     abi_ulong *target_fds;
957 
958     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
959     if (!(target_fds = lock_user(VERIFY_WRITE,
960                                  target_fds_addr,
961                                  sizeof(abi_ulong) * nw,
962                                  0)))
963         return -TARGET_EFAULT;
964 
965     k = 0;
966     for (i = 0; i < nw; i++) {
967         v = 0;
968         for (j = 0; j < TARGET_ABI_BITS; j++) {
969             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
970             k++;
971         }
972         __put_user(v, &target_fds[i]);
973     }
974 
975     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
976 
977     return 0;
978 }
979 #endif
980 
981 #if defined(__alpha__)
982 #define HOST_HZ 1024
983 #else
984 #define HOST_HZ 100
985 #endif
986 
987 static inline abi_long host_to_target_clock_t(long ticks)
988 {
989 #if HOST_HZ == TARGET_HZ
990     return ticks;
991 #else
992     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
993 #endif
994 }
995 
996 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
997                                              const struct rusage *rusage)
998 {
999     struct target_rusage *target_rusage;
1000 
1001     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1002         return -TARGET_EFAULT;
1003     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1004     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1005     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1006     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1007     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1008     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1009     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1010     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1011     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1012     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1013     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1014     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1015     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1016     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1017     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1018     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1019     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1020     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1021     unlock_user_struct(target_rusage, target_addr, 1);
1022 
1023     return 0;
1024 }
1025 
1026 #ifdef TARGET_NR_setrlimit
1027 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1028 {
1029     abi_ulong target_rlim_swap;
1030     rlim_t result;
1031 
1032     target_rlim_swap = tswapal(target_rlim);
1033     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1034         return RLIM_INFINITY;
1035 
1036     result = target_rlim_swap;
1037     if (target_rlim_swap != (rlim_t)result)
1038         return RLIM_INFINITY;
1039 
1040     return result;
1041 }
1042 #endif
1043 
1044 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1045 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1046 {
1047     abi_ulong target_rlim_swap;
1048     abi_ulong result;
1049 
1050     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1051         target_rlim_swap = TARGET_RLIM_INFINITY;
1052     else
1053         target_rlim_swap = rlim;
1054     result = tswapal(target_rlim_swap);
1055 
1056     return result;
1057 }
1058 #endif
1059 
1060 static inline int target_to_host_resource(int code)
1061 {
1062     switch (code) {
1063     case TARGET_RLIMIT_AS:
1064         return RLIMIT_AS;
1065     case TARGET_RLIMIT_CORE:
1066         return RLIMIT_CORE;
1067     case TARGET_RLIMIT_CPU:
1068         return RLIMIT_CPU;
1069     case TARGET_RLIMIT_DATA:
1070         return RLIMIT_DATA;
1071     case TARGET_RLIMIT_FSIZE:
1072         return RLIMIT_FSIZE;
1073     case TARGET_RLIMIT_LOCKS:
1074         return RLIMIT_LOCKS;
1075     case TARGET_RLIMIT_MEMLOCK:
1076         return RLIMIT_MEMLOCK;
1077     case TARGET_RLIMIT_MSGQUEUE:
1078         return RLIMIT_MSGQUEUE;
1079     case TARGET_RLIMIT_NICE:
1080         return RLIMIT_NICE;
1081     case TARGET_RLIMIT_NOFILE:
1082         return RLIMIT_NOFILE;
1083     case TARGET_RLIMIT_NPROC:
1084         return RLIMIT_NPROC;
1085     case TARGET_RLIMIT_RSS:
1086         return RLIMIT_RSS;
1087     case TARGET_RLIMIT_RTPRIO:
1088         return RLIMIT_RTPRIO;
1089 #ifdef RLIMIT_RTTIME
1090     case TARGET_RLIMIT_RTTIME:
1091         return RLIMIT_RTTIME;
1092 #endif
1093     case TARGET_RLIMIT_SIGPENDING:
1094         return RLIMIT_SIGPENDING;
1095     case TARGET_RLIMIT_STACK:
1096         return RLIMIT_STACK;
1097     default:
1098         return code;
1099     }
1100 }
1101 
1102 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1103                                               abi_ulong target_tv_addr)
1104 {
1105     struct target_timeval *target_tv;
1106 
1107     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1108         return -TARGET_EFAULT;
1109     }
1110 
1111     __get_user(tv->tv_sec, &target_tv->tv_sec);
1112     __get_user(tv->tv_usec, &target_tv->tv_usec);
1113 
1114     unlock_user_struct(target_tv, target_tv_addr, 0);
1115 
1116     return 0;
1117 }
1118 
1119 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1120                                             const struct timeval *tv)
1121 {
1122     struct target_timeval *target_tv;
1123 
1124     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1125         return -TARGET_EFAULT;
1126     }
1127 
1128     __put_user(tv->tv_sec, &target_tv->tv_sec);
1129     __put_user(tv->tv_usec, &target_tv->tv_usec);
1130 
1131     unlock_user_struct(target_tv, target_tv_addr, 1);
1132 
1133     return 0;
1134 }
1135 
1136 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1137 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1138                                                 abi_ulong target_tv_addr)
1139 {
1140     struct target__kernel_sock_timeval *target_tv;
1141 
1142     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1143         return -TARGET_EFAULT;
1144     }
1145 
1146     __get_user(tv->tv_sec, &target_tv->tv_sec);
1147     __get_user(tv->tv_usec, &target_tv->tv_usec);
1148 
1149     unlock_user_struct(target_tv, target_tv_addr, 0);
1150 
1151     return 0;
1152 }
1153 #endif
1154 
1155 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1156                                               const struct timeval *tv)
1157 {
1158     struct target__kernel_sock_timeval *target_tv;
1159 
1160     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1161         return -TARGET_EFAULT;
1162     }
1163 
1164     __put_user(tv->tv_sec, &target_tv->tv_sec);
1165     __put_user(tv->tv_usec, &target_tv->tv_usec);
1166 
1167     unlock_user_struct(target_tv, target_tv_addr, 1);
1168 
1169     return 0;
1170 }
1171 
1172 #if defined(TARGET_NR_futex) || \
1173     defined(TARGET_NR_rt_sigtimedwait) || \
1174     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1175     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1176     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1177     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1178     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1179     defined(TARGET_NR_timer_settime) || \
1180     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1181 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1182                                                abi_ulong target_addr)
1183 {
1184     struct target_timespec *target_ts;
1185 
1186     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1187         return -TARGET_EFAULT;
1188     }
1189     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1190     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1191     unlock_user_struct(target_ts, target_addr, 0);
1192     return 0;
1193 }
1194 #endif
1195 
1196 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1197     defined(TARGET_NR_timer_settime64) || \
1198     defined(TARGET_NR_mq_timedsend_time64) || \
1199     defined(TARGET_NR_mq_timedreceive_time64) || \
1200     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1201     defined(TARGET_NR_clock_nanosleep_time64) || \
1202     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1203     defined(TARGET_NR_utimensat) || \
1204     defined(TARGET_NR_utimensat_time64) || \
1205     defined(TARGET_NR_semtimedop_time64) || \
1206     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1207 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1208                                                  abi_ulong target_addr)
1209 {
1210     struct target__kernel_timespec *target_ts;
1211 
1212     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1213         return -TARGET_EFAULT;
1214     }
1215     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1216     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1217     /* in 32bit mode, this drops the padding */
1218     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1219     unlock_user_struct(target_ts, target_addr, 0);
1220     return 0;
1221 }
1222 #endif
1223 
1224 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1225                                                struct timespec *host_ts)
1226 {
1227     struct target_timespec *target_ts;
1228 
1229     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1230         return -TARGET_EFAULT;
1231     }
1232     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1233     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1234     unlock_user_struct(target_ts, target_addr, 1);
1235     return 0;
1236 }
1237 
1238 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1239                                                  struct timespec *host_ts)
1240 {
1241     struct target__kernel_timespec *target_ts;
1242 
1243     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1244         return -TARGET_EFAULT;
1245     }
1246     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1247     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1248     unlock_user_struct(target_ts, target_addr, 1);
1249     return 0;
1250 }
1251 
1252 #if defined(TARGET_NR_gettimeofday)
1253 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1254                                              struct timezone *tz)
1255 {
1256     struct target_timezone *target_tz;
1257 
1258     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1259         return -TARGET_EFAULT;
1260     }
1261 
1262     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1263     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1264 
1265     unlock_user_struct(target_tz, target_tz_addr, 1);
1266 
1267     return 0;
1268 }
1269 #endif
1270 
1271 #if defined(TARGET_NR_settimeofday)
1272 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1273                                                abi_ulong target_tz_addr)
1274 {
1275     struct target_timezone *target_tz;
1276 
1277     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1278         return -TARGET_EFAULT;
1279     }
1280 
1281     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1282     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1283 
1284     unlock_user_struct(target_tz, target_tz_addr, 0);
1285 
1286     return 0;
1287 }
1288 #endif
1289 
1290 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1291 #include <mqueue.h>
1292 
1293 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1294                                               abi_ulong target_mq_attr_addr)
1295 {
1296     struct target_mq_attr *target_mq_attr;
1297 
1298     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1299                           target_mq_attr_addr, 1))
1300         return -TARGET_EFAULT;
1301 
1302     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1303     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1304     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1305     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1306 
1307     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1308 
1309     return 0;
1310 }
1311 
1312 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1313                                             const struct mq_attr *attr)
1314 {
1315     struct target_mq_attr *target_mq_attr;
1316 
1317     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1318                           target_mq_attr_addr, 0))
1319         return -TARGET_EFAULT;
1320 
1321     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1322     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1323     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1324     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1325 
1326     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1327 
1328     return 0;
1329 }
1330 #endif
1331 
1332 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1333 /* do_select() must return target values and target errnos. */
1334 static abi_long do_select(int n,
1335                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1336                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1337 {
1338     fd_set rfds, wfds, efds;
1339     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1340     struct timeval tv;
1341     struct timespec ts, *ts_ptr;
1342     abi_long ret;
1343 
1344     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1345     if (ret) {
1346         return ret;
1347     }
1348     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1349     if (ret) {
1350         return ret;
1351     }
1352     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1353     if (ret) {
1354         return ret;
1355     }
1356 
1357     if (target_tv_addr) {
1358         if (copy_from_user_timeval(&tv, target_tv_addr))
1359             return -TARGET_EFAULT;
1360         ts.tv_sec = tv.tv_sec;
1361         ts.tv_nsec = tv.tv_usec * 1000;
1362         ts_ptr = &ts;
1363     } else {
1364         ts_ptr = NULL;
1365     }
1366 
1367     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1368                                   ts_ptr, NULL));
1369 
1370     if (!is_error(ret)) {
1371         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1372             return -TARGET_EFAULT;
1373         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1374             return -TARGET_EFAULT;
1375         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1376             return -TARGET_EFAULT;
1377 
1378         if (target_tv_addr) {
1379             tv.tv_sec = ts.tv_sec;
1380             tv.tv_usec = ts.tv_nsec / 1000;
1381             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1382                 return -TARGET_EFAULT;
1383             }
1384         }
1385     }
1386 
1387     return ret;
1388 }
1389 
1390 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1391 static abi_long do_old_select(abi_ulong arg1)
1392 {
1393     struct target_sel_arg_struct *sel;
1394     abi_ulong inp, outp, exp, tvp;
1395     long nsel;
1396 
1397     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1398         return -TARGET_EFAULT;
1399     }
1400 
1401     nsel = tswapal(sel->n);
1402     inp = tswapal(sel->inp);
1403     outp = tswapal(sel->outp);
1404     exp = tswapal(sel->exp);
1405     tvp = tswapal(sel->tvp);
1406 
1407     unlock_user_struct(sel, arg1, 0);
1408 
1409     return do_select(nsel, inp, outp, exp, tvp);
1410 }
1411 #endif
1412 #endif
1413 
1414 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1415 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1416                             abi_long arg4, abi_long arg5, abi_long arg6,
1417                             bool time64)
1418 {
1419     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1420     fd_set rfds, wfds, efds;
1421     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1422     struct timespec ts, *ts_ptr;
1423     abi_long ret;
1424 
1425     /*
1426      * The 6th arg is actually two args smashed together,
1427      * so we cannot use the C library.
1428      */
1429     struct {
1430         sigset_t *set;
1431         size_t size;
1432     } sig, *sig_ptr;
1433 
1434     abi_ulong arg_sigset, arg_sigsize, *arg7;
1435 
1436     n = arg1;
1437     rfd_addr = arg2;
1438     wfd_addr = arg3;
1439     efd_addr = arg4;
1440     ts_addr = arg5;
1441 
1442     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1443     if (ret) {
1444         return ret;
1445     }
1446     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1447     if (ret) {
1448         return ret;
1449     }
1450     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1451     if (ret) {
1452         return ret;
1453     }
1454 
1455     /*
1456      * This takes a timespec, and not a timeval, so we cannot
1457      * use the do_select() helper ...
1458      */
1459     if (ts_addr) {
1460         if (time64) {
1461             if (target_to_host_timespec64(&ts, ts_addr)) {
1462                 return -TARGET_EFAULT;
1463             }
1464         } else {
1465             if (target_to_host_timespec(&ts, ts_addr)) {
1466                 return -TARGET_EFAULT;
1467             }
1468         }
1469             ts_ptr = &ts;
1470     } else {
1471         ts_ptr = NULL;
1472     }
1473 
1474     /* Extract the two packed args for the sigset */
1475     sig_ptr = NULL;
1476     if (arg6) {
1477         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1478         if (!arg7) {
1479             return -TARGET_EFAULT;
1480         }
1481         arg_sigset = tswapal(arg7[0]);
1482         arg_sigsize = tswapal(arg7[1]);
1483         unlock_user(arg7, arg6, 0);
1484 
1485         if (arg_sigset) {
1486             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1487             if (ret != 0) {
1488                 return ret;
1489             }
1490             sig_ptr = &sig;
1491             sig.size = SIGSET_T_SIZE;
1492         }
1493     }
1494 
1495     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1496                                   ts_ptr, sig_ptr));
1497 
1498     if (sig_ptr) {
1499         finish_sigsuspend_mask(ret);
1500     }
1501 
1502     if (!is_error(ret)) {
1503         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1507             return -TARGET_EFAULT;
1508         }
1509         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1510             return -TARGET_EFAULT;
1511         }
1512         if (time64) {
1513             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1514                 return -TARGET_EFAULT;
1515             }
1516         } else {
1517             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1518                 return -TARGET_EFAULT;
1519             }
1520         }
1521     }
1522     return ret;
1523 }
1524 #endif
1525 
1526 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1527     defined(TARGET_NR_ppoll_time64)
1528 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1529                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1530 {
1531     struct target_pollfd *target_pfd;
1532     unsigned int nfds = arg2;
1533     struct pollfd *pfd;
1534     unsigned int i;
1535     abi_long ret;
1536 
1537     pfd = NULL;
1538     target_pfd = NULL;
1539     if (nfds) {
1540         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1541             return -TARGET_EINVAL;
1542         }
1543         target_pfd = lock_user(VERIFY_WRITE, arg1,
1544                                sizeof(struct target_pollfd) * nfds, 1);
1545         if (!target_pfd) {
1546             return -TARGET_EFAULT;
1547         }
1548 
1549         pfd = alloca(sizeof(struct pollfd) * nfds);
1550         for (i = 0; i < nfds; i++) {
1551             pfd[i].fd = tswap32(target_pfd[i].fd);
1552             pfd[i].events = tswap16(target_pfd[i].events);
1553         }
1554     }
1555     if (ppoll) {
1556         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1557         sigset_t *set = NULL;
1558 
1559         if (arg3) {
1560             if (time64) {
1561                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1562                     unlock_user(target_pfd, arg1, 0);
1563                     return -TARGET_EFAULT;
1564                 }
1565             } else {
1566                 if (target_to_host_timespec(timeout_ts, arg3)) {
1567                     unlock_user(target_pfd, arg1, 0);
1568                     return -TARGET_EFAULT;
1569                 }
1570             }
1571         } else {
1572             timeout_ts = NULL;
1573         }
1574 
1575         if (arg4) {
1576             ret = process_sigsuspend_mask(&set, arg4, arg5);
1577             if (ret != 0) {
1578                 unlock_user(target_pfd, arg1, 0);
1579                 return ret;
1580             }
1581         }
1582 
1583         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1584                                    set, SIGSET_T_SIZE));
1585 
1586         if (set) {
1587             finish_sigsuspend_mask(ret);
1588         }
1589         if (!is_error(ret) && arg3) {
1590             if (time64) {
1591                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1592                     return -TARGET_EFAULT;
1593                 }
1594             } else {
1595                 if (host_to_target_timespec(arg3, timeout_ts)) {
1596                     return -TARGET_EFAULT;
1597                 }
1598             }
1599         }
1600     } else {
1601           struct timespec ts, *pts;
1602 
1603           if (arg3 >= 0) {
1604               /* Convert ms to secs, ns */
1605               ts.tv_sec = arg3 / 1000;
1606               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1607               pts = &ts;
1608           } else {
1609               /* -ve poll() timeout means "infinite" */
1610               pts = NULL;
1611           }
1612           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1613     }
1614 
1615     if (!is_error(ret)) {
1616         for (i = 0; i < nfds; i++) {
1617             target_pfd[i].revents = tswap16(pfd[i].revents);
1618         }
1619     }
1620     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1621     return ret;
1622 }
1623 #endif
1624 
1625 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1626                         int flags, int is_pipe2)
1627 {
1628     int host_pipe[2];
1629     abi_long ret;
1630     ret = pipe2(host_pipe, flags);
1631 
1632     if (is_error(ret))
1633         return get_errno(ret);
1634 
1635     /* Several targets have special calling conventions for the original
1636        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1637     if (!is_pipe2) {
1638 #if defined(TARGET_ALPHA)
1639         cpu_env->ir[IR_A4] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_MIPS)
1642         cpu_env->active_tc.gpr[3] = host_pipe[1];
1643         return host_pipe[0];
1644 #elif defined(TARGET_SH4)
1645         cpu_env->gregs[1] = host_pipe[1];
1646         return host_pipe[0];
1647 #elif defined(TARGET_SPARC)
1648         cpu_env->regwptr[1] = host_pipe[1];
1649         return host_pipe[0];
1650 #endif
1651     }
1652 
1653     if (put_user_s32(host_pipe[0], pipedes)
1654         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1655         return -TARGET_EFAULT;
1656     return get_errno(ret);
1657 }
1658 
1659 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1660                                               abi_ulong target_addr,
1661                                               socklen_t len)
1662 {
1663     struct target_ip_mreqn *target_smreqn;
1664 
1665     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1666     if (!target_smreqn)
1667         return -TARGET_EFAULT;
1668     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1669     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1670     if (len == sizeof(struct target_ip_mreqn))
1671         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1672     unlock_user(target_smreqn, target_addr, 0);
1673 
1674     return 0;
1675 }
1676 
1677 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1678                                                abi_ulong target_addr,
1679                                                socklen_t len)
1680 {
1681     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1682     sa_family_t sa_family;
1683     struct target_sockaddr *target_saddr;
1684 
1685     if (fd_trans_target_to_host_addr(fd)) {
1686         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1687     }
1688 
1689     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1690     if (!target_saddr)
1691         return -TARGET_EFAULT;
1692 
1693     sa_family = tswap16(target_saddr->sa_family);
1694 
1695     /* Oops. The caller might send a incomplete sun_path; sun_path
1696      * must be terminated by \0 (see the manual page), but
1697      * unfortunately it is quite common to specify sockaddr_un
1698      * length as "strlen(x->sun_path)" while it should be
1699      * "strlen(...) + 1". We'll fix that here if needed.
1700      * Linux kernel has a similar feature.
1701      */
1702 
1703     if (sa_family == AF_UNIX) {
1704         if (len < unix_maxlen && len > 0) {
1705             char *cp = (char*)target_saddr;
1706 
1707             if ( cp[len-1] && !cp[len] )
1708                 len++;
1709         }
1710         if (len > unix_maxlen)
1711             len = unix_maxlen;
1712     }
1713 
1714     memcpy(addr, target_saddr, len);
1715     addr->sa_family = sa_family;
1716     if (sa_family == AF_NETLINK) {
1717         struct sockaddr_nl *nladdr;
1718 
1719         nladdr = (struct sockaddr_nl *)addr;
1720         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1721         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1722     } else if (sa_family == AF_PACKET) {
1723 	struct target_sockaddr_ll *lladdr;
1724 
1725 	lladdr = (struct target_sockaddr_ll *)addr;
1726 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1727 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1728     } else if (sa_family == AF_INET6) {
1729         struct sockaddr_in6 *in6addr;
1730 
1731         in6addr = (struct sockaddr_in6 *)addr;
1732         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1733     }
1734     unlock_user(target_saddr, target_addr, 0);
1735 
1736     return 0;
1737 }
1738 
1739 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1740                                                struct sockaddr *addr,
1741                                                socklen_t len)
1742 {
1743     struct target_sockaddr *target_saddr;
1744 
1745     if (len == 0) {
1746         return 0;
1747     }
1748     assert(addr);
1749 
1750     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1751     if (!target_saddr)
1752         return -TARGET_EFAULT;
1753     memcpy(target_saddr, addr, len);
1754     if (len >= offsetof(struct target_sockaddr, sa_family) +
1755         sizeof(target_saddr->sa_family)) {
1756         target_saddr->sa_family = tswap16(addr->sa_family);
1757     }
1758     if (addr->sa_family == AF_NETLINK &&
1759         len >= sizeof(struct target_sockaddr_nl)) {
1760         struct target_sockaddr_nl *target_nl =
1761                (struct target_sockaddr_nl *)target_saddr;
1762         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1763         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1764     } else if (addr->sa_family == AF_PACKET) {
1765         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1766         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1767         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1768     } else if (addr->sa_family == AF_INET6 &&
1769                len >= sizeof(struct target_sockaddr_in6)) {
1770         struct target_sockaddr_in6 *target_in6 =
1771                (struct target_sockaddr_in6 *)target_saddr;
1772         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1773     }
1774     unlock_user(target_saddr, target_addr, len);
1775 
1776     return 0;
1777 }
1778 
1779 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1780                                            struct target_msghdr *target_msgh)
1781 {
1782     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1783     abi_long msg_controllen;
1784     abi_ulong target_cmsg_addr;
1785     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1786     socklen_t space = 0;
1787 
1788     msg_controllen = tswapal(target_msgh->msg_controllen);
1789     if (msg_controllen < sizeof (struct target_cmsghdr))
1790         goto the_end;
1791     target_cmsg_addr = tswapal(target_msgh->msg_control);
1792     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1793     target_cmsg_start = target_cmsg;
1794     if (!target_cmsg)
1795         return -TARGET_EFAULT;
1796 
1797     while (cmsg && target_cmsg) {
1798         void *data = CMSG_DATA(cmsg);
1799         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1800 
1801         int len = tswapal(target_cmsg->cmsg_len)
1802             - sizeof(struct target_cmsghdr);
1803 
1804         space += CMSG_SPACE(len);
1805         if (space > msgh->msg_controllen) {
1806             space -= CMSG_SPACE(len);
1807             /* This is a QEMU bug, since we allocated the payload
1808              * area ourselves (unlike overflow in host-to-target
1809              * conversion, which is just the guest giving us a buffer
1810              * that's too small). It can't happen for the payload types
1811              * we currently support; if it becomes an issue in future
1812              * we would need to improve our allocation strategy to
1813              * something more intelligent than "twice the size of the
1814              * target buffer we're reading from".
1815              */
1816             qemu_log_mask(LOG_UNIMP,
1817                           ("Unsupported ancillary data %d/%d: "
1818                            "unhandled msg size\n"),
1819                           tswap32(target_cmsg->cmsg_level),
1820                           tswap32(target_cmsg->cmsg_type));
1821             break;
1822         }
1823 
1824         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1825             cmsg->cmsg_level = SOL_SOCKET;
1826         } else {
1827             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1828         }
1829         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1830         cmsg->cmsg_len = CMSG_LEN(len);
1831 
1832         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1833             int *fd = (int *)data;
1834             int *target_fd = (int *)target_data;
1835             int i, numfds = len / sizeof(int);
1836 
1837             for (i = 0; i < numfds; i++) {
1838                 __get_user(fd[i], target_fd + i);
1839             }
1840         } else if (cmsg->cmsg_level == SOL_SOCKET
1841                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1842             struct ucred *cred = (struct ucred *)data;
1843             struct target_ucred *target_cred =
1844                 (struct target_ucred *)target_data;
1845 
1846             __get_user(cred->pid, &target_cred->pid);
1847             __get_user(cred->uid, &target_cred->uid);
1848             __get_user(cred->gid, &target_cred->gid);
1849         } else if (cmsg->cmsg_level == SOL_ALG) {
1850             uint32_t *dst = (uint32_t *)data;
1851 
1852             memcpy(dst, target_data, len);
1853             /* fix endianess of first 32-bit word */
1854             if (len >= sizeof(uint32_t)) {
1855                 *dst = tswap32(*dst);
1856             }
1857         } else {
1858             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1859                           cmsg->cmsg_level, cmsg->cmsg_type);
1860             memcpy(data, target_data, len);
1861         }
1862 
1863         cmsg = CMSG_NXTHDR(msgh, cmsg);
1864         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1865                                          target_cmsg_start);
1866     }
1867     unlock_user(target_cmsg, target_cmsg_addr, 0);
1868  the_end:
1869     msgh->msg_controllen = space;
1870     return 0;
1871 }
1872 
1873 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1874                                            struct msghdr *msgh)
1875 {
1876     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1877     abi_long msg_controllen;
1878     abi_ulong target_cmsg_addr;
1879     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1880     socklen_t space = 0;
1881 
1882     msg_controllen = tswapal(target_msgh->msg_controllen);
1883     if (msg_controllen < sizeof (struct target_cmsghdr))
1884         goto the_end;
1885     target_cmsg_addr = tswapal(target_msgh->msg_control);
1886     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1887     target_cmsg_start = target_cmsg;
1888     if (!target_cmsg)
1889         return -TARGET_EFAULT;
1890 
1891     while (cmsg && target_cmsg) {
1892         void *data = CMSG_DATA(cmsg);
1893         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1894 
1895         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1896         int tgt_len, tgt_space;
1897 
1898         /* We never copy a half-header but may copy half-data;
1899          * this is Linux's behaviour in put_cmsg(). Note that
1900          * truncation here is a guest problem (which we report
1901          * to the guest via the CTRUNC bit), unlike truncation
1902          * in target_to_host_cmsg, which is a QEMU bug.
1903          */
1904         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1905             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1906             break;
1907         }
1908 
1909         if (cmsg->cmsg_level == SOL_SOCKET) {
1910             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1911         } else {
1912             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1913         }
1914         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1915 
1916         /* Payload types which need a different size of payload on
1917          * the target must adjust tgt_len here.
1918          */
1919         tgt_len = len;
1920         switch (cmsg->cmsg_level) {
1921         case SOL_SOCKET:
1922             switch (cmsg->cmsg_type) {
1923             case SO_TIMESTAMP:
1924                 tgt_len = sizeof(struct target_timeval);
1925                 break;
1926             default:
1927                 break;
1928             }
1929             break;
1930         default:
1931             break;
1932         }
1933 
1934         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1935             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1936             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1937         }
1938 
1939         /* We must now copy-and-convert len bytes of payload
1940          * into tgt_len bytes of destination space. Bear in mind
1941          * that in both source and destination we may be dealing
1942          * with a truncated value!
1943          */
1944         switch (cmsg->cmsg_level) {
1945         case SOL_SOCKET:
1946             switch (cmsg->cmsg_type) {
1947             case SCM_RIGHTS:
1948             {
1949                 int *fd = (int *)data;
1950                 int *target_fd = (int *)target_data;
1951                 int i, numfds = tgt_len / sizeof(int);
1952 
1953                 for (i = 0; i < numfds; i++) {
1954                     __put_user(fd[i], target_fd + i);
1955                 }
1956                 break;
1957             }
1958             case SO_TIMESTAMP:
1959             {
1960                 struct timeval *tv = (struct timeval *)data;
1961                 struct target_timeval *target_tv =
1962                     (struct target_timeval *)target_data;
1963 
1964                 if (len != sizeof(struct timeval) ||
1965                     tgt_len != sizeof(struct target_timeval)) {
1966                     goto unimplemented;
1967                 }
1968 
1969                 /* copy struct timeval to target */
1970                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1971                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1972                 break;
1973             }
1974             case SCM_CREDENTIALS:
1975             {
1976                 struct ucred *cred = (struct ucred *)data;
1977                 struct target_ucred *target_cred =
1978                     (struct target_ucred *)target_data;
1979 
1980                 __put_user(cred->pid, &target_cred->pid);
1981                 __put_user(cred->uid, &target_cred->uid);
1982                 __put_user(cred->gid, &target_cred->gid);
1983                 break;
1984             }
1985             default:
1986                 goto unimplemented;
1987             }
1988             break;
1989 
1990         case SOL_IP:
1991             switch (cmsg->cmsg_type) {
1992             case IP_TTL:
1993             {
1994                 uint32_t *v = (uint32_t *)data;
1995                 uint32_t *t_int = (uint32_t *)target_data;
1996 
1997                 if (len != sizeof(uint32_t) ||
1998                     tgt_len != sizeof(uint32_t)) {
1999                     goto unimplemented;
2000                 }
2001                 __put_user(*v, t_int);
2002                 break;
2003             }
2004             case IP_RECVERR:
2005             {
2006                 struct errhdr_t {
2007                    struct sock_extended_err ee;
2008                    struct sockaddr_in offender;
2009                 };
2010                 struct errhdr_t *errh = (struct errhdr_t *)data;
2011                 struct errhdr_t *target_errh =
2012                     (struct errhdr_t *)target_data;
2013 
2014                 if (len != sizeof(struct errhdr_t) ||
2015                     tgt_len != sizeof(struct errhdr_t)) {
2016                     goto unimplemented;
2017                 }
2018                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2019                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2020                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2021                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2022                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2023                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2024                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2025                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2026                     (void *) &errh->offender, sizeof(errh->offender));
2027                 break;
2028             }
2029             default:
2030                 goto unimplemented;
2031             }
2032             break;
2033 
2034         case SOL_IPV6:
2035             switch (cmsg->cmsg_type) {
2036             case IPV6_HOPLIMIT:
2037             {
2038                 uint32_t *v = (uint32_t *)data;
2039                 uint32_t *t_int = (uint32_t *)target_data;
2040 
2041                 if (len != sizeof(uint32_t) ||
2042                     tgt_len != sizeof(uint32_t)) {
2043                     goto unimplemented;
2044                 }
2045                 __put_user(*v, t_int);
2046                 break;
2047             }
2048             case IPV6_RECVERR:
2049             {
2050                 struct errhdr6_t {
2051                    struct sock_extended_err ee;
2052                    struct sockaddr_in6 offender;
2053                 };
2054                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2055                 struct errhdr6_t *target_errh =
2056                     (struct errhdr6_t *)target_data;
2057 
2058                 if (len != sizeof(struct errhdr6_t) ||
2059                     tgt_len != sizeof(struct errhdr6_t)) {
2060                     goto unimplemented;
2061                 }
2062                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2063                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2064                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2065                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2066                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2067                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2068                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2069                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2070                     (void *) &errh->offender, sizeof(errh->offender));
2071                 break;
2072             }
2073             default:
2074                 goto unimplemented;
2075             }
2076             break;
2077 
2078         default:
2079         unimplemented:
2080             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2081                           cmsg->cmsg_level, cmsg->cmsg_type);
2082             memcpy(target_data, data, MIN(len, tgt_len));
2083             if (tgt_len > len) {
2084                 memset(target_data + len, 0, tgt_len - len);
2085             }
2086         }
2087 
2088         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2089         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2090         if (msg_controllen < tgt_space) {
2091             tgt_space = msg_controllen;
2092         }
2093         msg_controllen -= tgt_space;
2094         space += tgt_space;
2095         cmsg = CMSG_NXTHDR(msgh, cmsg);
2096         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2097                                          target_cmsg_start);
2098     }
2099     unlock_user(target_cmsg, target_cmsg_addr, space);
2100  the_end:
2101     target_msgh->msg_controllen = tswapal(space);
2102     return 0;
2103 }
2104 
2105 /* do_setsockopt() Must return target values and target errnos. */
2106 static abi_long do_setsockopt(int sockfd, int level, int optname,
2107                               abi_ulong optval_addr, socklen_t optlen)
2108 {
2109     abi_long ret;
2110     int val;
2111     struct ip_mreqn *ip_mreq;
2112     struct ip_mreq_source *ip_mreq_source;
2113 
2114     switch(level) {
2115     case SOL_TCP:
2116     case SOL_UDP:
2117         /* TCP and UDP options all take an 'int' value.  */
2118         if (optlen < sizeof(uint32_t))
2119             return -TARGET_EINVAL;
2120 
2121         if (get_user_u32(val, optval_addr))
2122             return -TARGET_EFAULT;
2123         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2124         break;
2125     case SOL_IP:
2126         switch(optname) {
2127         case IP_TOS:
2128         case IP_TTL:
2129         case IP_HDRINCL:
2130         case IP_ROUTER_ALERT:
2131         case IP_RECVOPTS:
2132         case IP_RETOPTS:
2133         case IP_PKTINFO:
2134         case IP_MTU_DISCOVER:
2135         case IP_RECVERR:
2136         case IP_RECVTTL:
2137         case IP_RECVTOS:
2138 #ifdef IP_FREEBIND
2139         case IP_FREEBIND:
2140 #endif
2141         case IP_MULTICAST_TTL:
2142         case IP_MULTICAST_LOOP:
2143             val = 0;
2144             if (optlen >= sizeof(uint32_t)) {
2145                 if (get_user_u32(val, optval_addr))
2146                     return -TARGET_EFAULT;
2147             } else if (optlen >= 1) {
2148                 if (get_user_u8(val, optval_addr))
2149                     return -TARGET_EFAULT;
2150             }
2151             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2152             break;
2153         case IP_ADD_MEMBERSHIP:
2154         case IP_DROP_MEMBERSHIP:
2155             if (optlen < sizeof (struct target_ip_mreq) ||
2156                 optlen > sizeof (struct target_ip_mreqn))
2157                 return -TARGET_EINVAL;
2158 
2159             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2160             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2161             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2162             break;
2163 
2164         case IP_BLOCK_SOURCE:
2165         case IP_UNBLOCK_SOURCE:
2166         case IP_ADD_SOURCE_MEMBERSHIP:
2167         case IP_DROP_SOURCE_MEMBERSHIP:
2168             if (optlen != sizeof (struct target_ip_mreq_source))
2169                 return -TARGET_EINVAL;
2170 
2171             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2172             if (!ip_mreq_source) {
2173                 return -TARGET_EFAULT;
2174             }
2175             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2176             unlock_user (ip_mreq_source, optval_addr, 0);
2177             break;
2178 
2179         default:
2180             goto unimplemented;
2181         }
2182         break;
2183     case SOL_IPV6:
2184         switch (optname) {
2185         case IPV6_MTU_DISCOVER:
2186         case IPV6_MTU:
2187         case IPV6_V6ONLY:
2188         case IPV6_RECVPKTINFO:
2189         case IPV6_UNICAST_HOPS:
2190         case IPV6_MULTICAST_HOPS:
2191         case IPV6_MULTICAST_LOOP:
2192         case IPV6_RECVERR:
2193         case IPV6_RECVHOPLIMIT:
2194         case IPV6_2292HOPLIMIT:
2195         case IPV6_CHECKSUM:
2196         case IPV6_ADDRFORM:
2197         case IPV6_2292PKTINFO:
2198         case IPV6_RECVTCLASS:
2199         case IPV6_RECVRTHDR:
2200         case IPV6_2292RTHDR:
2201         case IPV6_RECVHOPOPTS:
2202         case IPV6_2292HOPOPTS:
2203         case IPV6_RECVDSTOPTS:
2204         case IPV6_2292DSTOPTS:
2205         case IPV6_TCLASS:
2206         case IPV6_ADDR_PREFERENCES:
2207 #ifdef IPV6_RECVPATHMTU
2208         case IPV6_RECVPATHMTU:
2209 #endif
2210 #ifdef IPV6_TRANSPARENT
2211         case IPV6_TRANSPARENT:
2212 #endif
2213 #ifdef IPV6_FREEBIND
2214         case IPV6_FREEBIND:
2215 #endif
2216 #ifdef IPV6_RECVORIGDSTADDR
2217         case IPV6_RECVORIGDSTADDR:
2218 #endif
2219             val = 0;
2220             if (optlen < sizeof(uint32_t)) {
2221                 return -TARGET_EINVAL;
2222             }
2223             if (get_user_u32(val, optval_addr)) {
2224                 return -TARGET_EFAULT;
2225             }
2226             ret = get_errno(setsockopt(sockfd, level, optname,
2227                                        &val, sizeof(val)));
2228             break;
2229         case IPV6_PKTINFO:
2230         {
2231             struct in6_pktinfo pki;
2232 
2233             if (optlen < sizeof(pki)) {
2234                 return -TARGET_EINVAL;
2235             }
2236 
2237             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2238                 return -TARGET_EFAULT;
2239             }
2240 
2241             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2242 
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &pki, sizeof(pki)));
2245             break;
2246         }
2247         case IPV6_ADD_MEMBERSHIP:
2248         case IPV6_DROP_MEMBERSHIP:
2249         {
2250             struct ipv6_mreq ipv6mreq;
2251 
2252             if (optlen < sizeof(ipv6mreq)) {
2253                 return -TARGET_EINVAL;
2254             }
2255 
2256             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2257                 return -TARGET_EFAULT;
2258             }
2259 
2260             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2261 
2262             ret = get_errno(setsockopt(sockfd, level, optname,
2263                                        &ipv6mreq, sizeof(ipv6mreq)));
2264             break;
2265         }
2266         default:
2267             goto unimplemented;
2268         }
2269         break;
2270     case SOL_ICMPV6:
2271         switch (optname) {
2272         case ICMPV6_FILTER:
2273         {
2274             struct icmp6_filter icmp6f;
2275 
2276             if (optlen > sizeof(icmp6f)) {
2277                 optlen = sizeof(icmp6f);
2278             }
2279 
2280             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2281                 return -TARGET_EFAULT;
2282             }
2283 
2284             for (val = 0; val < 8; val++) {
2285                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2286             }
2287 
2288             ret = get_errno(setsockopt(sockfd, level, optname,
2289                                        &icmp6f, optlen));
2290             break;
2291         }
2292         default:
2293             goto unimplemented;
2294         }
2295         break;
2296     case SOL_RAW:
2297         switch (optname) {
2298         case ICMP_FILTER:
2299         case IPV6_CHECKSUM:
2300             /* those take an u32 value */
2301             if (optlen < sizeof(uint32_t)) {
2302                 return -TARGET_EINVAL;
2303             }
2304 
2305             if (get_user_u32(val, optval_addr)) {
2306                 return -TARGET_EFAULT;
2307             }
2308             ret = get_errno(setsockopt(sockfd, level, optname,
2309                                        &val, sizeof(val)));
2310             break;
2311 
2312         default:
2313             goto unimplemented;
2314         }
2315         break;
2316 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2317     case SOL_ALG:
2318         switch (optname) {
2319         case ALG_SET_KEY:
2320         {
2321             char *alg_key = g_malloc(optlen);
2322 
2323             if (!alg_key) {
2324                 return -TARGET_ENOMEM;
2325             }
2326             if (copy_from_user(alg_key, optval_addr, optlen)) {
2327                 g_free(alg_key);
2328                 return -TARGET_EFAULT;
2329             }
2330             ret = get_errno(setsockopt(sockfd, level, optname,
2331                                        alg_key, optlen));
2332             g_free(alg_key);
2333             break;
2334         }
2335         case ALG_SET_AEAD_AUTHSIZE:
2336         {
2337             ret = get_errno(setsockopt(sockfd, level, optname,
2338                                        NULL, optlen));
2339             break;
2340         }
2341         default:
2342             goto unimplemented;
2343         }
2344         break;
2345 #endif
2346     case TARGET_SOL_SOCKET:
2347         switch (optname) {
2348         case TARGET_SO_RCVTIMEO:
2349         {
2350                 struct timeval tv;
2351 
2352                 optname = SO_RCVTIMEO;
2353 
2354 set_timeout:
2355                 if (optlen != sizeof(struct target_timeval)) {
2356                     return -TARGET_EINVAL;
2357                 }
2358 
2359                 if (copy_from_user_timeval(&tv, optval_addr)) {
2360                     return -TARGET_EFAULT;
2361                 }
2362 
2363                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2364                                 &tv, sizeof(tv)));
2365                 return ret;
2366         }
2367         case TARGET_SO_SNDTIMEO:
2368                 optname = SO_SNDTIMEO;
2369                 goto set_timeout;
2370         case TARGET_SO_ATTACH_FILTER:
2371         {
2372                 struct target_sock_fprog *tfprog;
2373                 struct target_sock_filter *tfilter;
2374                 struct sock_fprog fprog;
2375                 struct sock_filter *filter;
2376                 int i;
2377 
2378                 if (optlen != sizeof(*tfprog)) {
2379                     return -TARGET_EINVAL;
2380                 }
2381                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2382                     return -TARGET_EFAULT;
2383                 }
2384                 if (!lock_user_struct(VERIFY_READ, tfilter,
2385                                       tswapal(tfprog->filter), 0)) {
2386                     unlock_user_struct(tfprog, optval_addr, 1);
2387                     return -TARGET_EFAULT;
2388                 }
2389 
2390                 fprog.len = tswap16(tfprog->len);
2391                 filter = g_try_new(struct sock_filter, fprog.len);
2392                 if (filter == NULL) {
2393                     unlock_user_struct(tfilter, tfprog->filter, 1);
2394                     unlock_user_struct(tfprog, optval_addr, 1);
2395                     return -TARGET_ENOMEM;
2396                 }
2397                 for (i = 0; i < fprog.len; i++) {
2398                     filter[i].code = tswap16(tfilter[i].code);
2399                     filter[i].jt = tfilter[i].jt;
2400                     filter[i].jf = tfilter[i].jf;
2401                     filter[i].k = tswap32(tfilter[i].k);
2402                 }
2403                 fprog.filter = filter;
2404 
2405                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2406                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2407                 g_free(filter);
2408 
2409                 unlock_user_struct(tfilter, tfprog->filter, 1);
2410                 unlock_user_struct(tfprog, optval_addr, 1);
2411                 return ret;
2412         }
2413 	case TARGET_SO_BINDTODEVICE:
2414 	{
2415 		char *dev_ifname, *addr_ifname;
2416 
2417 		if (optlen > IFNAMSIZ - 1) {
2418 		    optlen = IFNAMSIZ - 1;
2419 		}
2420 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2421 		if (!dev_ifname) {
2422 		    return -TARGET_EFAULT;
2423 		}
2424 		optname = SO_BINDTODEVICE;
2425 		addr_ifname = alloca(IFNAMSIZ);
2426 		memcpy(addr_ifname, dev_ifname, optlen);
2427 		addr_ifname[optlen] = 0;
2428 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2429                                            addr_ifname, optlen));
2430 		unlock_user (dev_ifname, optval_addr, 0);
2431 		return ret;
2432 	}
2433         case TARGET_SO_LINGER:
2434         {
2435                 struct linger lg;
2436                 struct target_linger *tlg;
2437 
2438                 if (optlen != sizeof(struct target_linger)) {
2439                     return -TARGET_EINVAL;
2440                 }
2441                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2442                     return -TARGET_EFAULT;
2443                 }
2444                 __get_user(lg.l_onoff, &tlg->l_onoff);
2445                 __get_user(lg.l_linger, &tlg->l_linger);
2446                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2447                                 &lg, sizeof(lg)));
2448                 unlock_user_struct(tlg, optval_addr, 0);
2449                 return ret;
2450         }
2451             /* Options with 'int' argument.  */
2452         case TARGET_SO_DEBUG:
2453 		optname = SO_DEBUG;
2454 		break;
2455         case TARGET_SO_REUSEADDR:
2456 		optname = SO_REUSEADDR;
2457 		break;
2458 #ifdef SO_REUSEPORT
2459         case TARGET_SO_REUSEPORT:
2460                 optname = SO_REUSEPORT;
2461                 break;
2462 #endif
2463         case TARGET_SO_TYPE:
2464 		optname = SO_TYPE;
2465 		break;
2466         case TARGET_SO_ERROR:
2467 		optname = SO_ERROR;
2468 		break;
2469         case TARGET_SO_DONTROUTE:
2470 		optname = SO_DONTROUTE;
2471 		break;
2472         case TARGET_SO_BROADCAST:
2473 		optname = SO_BROADCAST;
2474 		break;
2475         case TARGET_SO_SNDBUF:
2476 		optname = SO_SNDBUF;
2477 		break;
2478         case TARGET_SO_SNDBUFFORCE:
2479                 optname = SO_SNDBUFFORCE;
2480                 break;
2481         case TARGET_SO_RCVBUF:
2482 		optname = SO_RCVBUF;
2483 		break;
2484         case TARGET_SO_RCVBUFFORCE:
2485                 optname = SO_RCVBUFFORCE;
2486                 break;
2487         case TARGET_SO_KEEPALIVE:
2488 		optname = SO_KEEPALIVE;
2489 		break;
2490         case TARGET_SO_OOBINLINE:
2491 		optname = SO_OOBINLINE;
2492 		break;
2493         case TARGET_SO_NO_CHECK:
2494 		optname = SO_NO_CHECK;
2495 		break;
2496         case TARGET_SO_PRIORITY:
2497 		optname = SO_PRIORITY;
2498 		break;
2499 #ifdef SO_BSDCOMPAT
2500         case TARGET_SO_BSDCOMPAT:
2501 		optname = SO_BSDCOMPAT;
2502 		break;
2503 #endif
2504         case TARGET_SO_PASSCRED:
2505 		optname = SO_PASSCRED;
2506 		break;
2507         case TARGET_SO_PASSSEC:
2508                 optname = SO_PASSSEC;
2509                 break;
2510         case TARGET_SO_TIMESTAMP:
2511 		optname = SO_TIMESTAMP;
2512 		break;
2513         case TARGET_SO_RCVLOWAT:
2514 		optname = SO_RCVLOWAT;
2515 		break;
2516         default:
2517             goto unimplemented;
2518         }
2519 	if (optlen < sizeof(uint32_t))
2520             return -TARGET_EINVAL;
2521 
2522 	if (get_user_u32(val, optval_addr))
2523             return -TARGET_EFAULT;
2524 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2525         break;
2526 #ifdef SOL_NETLINK
2527     case SOL_NETLINK:
2528         switch (optname) {
2529         case NETLINK_PKTINFO:
2530         case NETLINK_ADD_MEMBERSHIP:
2531         case NETLINK_DROP_MEMBERSHIP:
2532         case NETLINK_BROADCAST_ERROR:
2533         case NETLINK_NO_ENOBUFS:
2534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2535         case NETLINK_LISTEN_ALL_NSID:
2536         case NETLINK_CAP_ACK:
2537 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2538 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2539         case NETLINK_EXT_ACK:
2540 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2541 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2542         case NETLINK_GET_STRICT_CHK:
2543 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2544             break;
2545         default:
2546             goto unimplemented;
2547         }
2548         val = 0;
2549         if (optlen < sizeof(uint32_t)) {
2550             return -TARGET_EINVAL;
2551         }
2552         if (get_user_u32(val, optval_addr)) {
2553             return -TARGET_EFAULT;
2554         }
2555         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2556                                    sizeof(val)));
2557         break;
2558 #endif /* SOL_NETLINK */
2559     default:
2560     unimplemented:
2561         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2562                       level, optname);
2563         ret = -TARGET_ENOPROTOOPT;
2564     }
2565     return ret;
2566 }
2567 
2568 /* do_getsockopt() Must return target values and target errnos. */
2569 static abi_long do_getsockopt(int sockfd, int level, int optname,
2570                               abi_ulong optval_addr, abi_ulong optlen)
2571 {
2572     abi_long ret;
2573     int len, val;
2574     socklen_t lv;
2575 
2576     switch(level) {
2577     case TARGET_SOL_SOCKET:
2578         level = SOL_SOCKET;
2579         switch (optname) {
2580         /* These don't just return a single integer */
2581         case TARGET_SO_PEERNAME:
2582             goto unimplemented;
2583         case TARGET_SO_RCVTIMEO: {
2584             struct timeval tv;
2585             socklen_t tvlen;
2586 
2587             optname = SO_RCVTIMEO;
2588 
2589 get_timeout:
2590             if (get_user_u32(len, optlen)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             if (len < 0) {
2594                 return -TARGET_EINVAL;
2595             }
2596 
2597             tvlen = sizeof(tv);
2598             ret = get_errno(getsockopt(sockfd, level, optname,
2599                                        &tv, &tvlen));
2600             if (ret < 0) {
2601                 return ret;
2602             }
2603             if (len > sizeof(struct target_timeval)) {
2604                 len = sizeof(struct target_timeval);
2605             }
2606             if (copy_to_user_timeval(optval_addr, &tv)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             if (put_user_u32(len, optlen)) {
2610                 return -TARGET_EFAULT;
2611             }
2612             break;
2613         }
2614         case TARGET_SO_SNDTIMEO:
2615             optname = SO_SNDTIMEO;
2616             goto get_timeout;
2617         case TARGET_SO_PEERCRED: {
2618             struct ucred cr;
2619             socklen_t crlen;
2620             struct target_ucred *tcr;
2621 
2622             if (get_user_u32(len, optlen)) {
2623                 return -TARGET_EFAULT;
2624             }
2625             if (len < 0) {
2626                 return -TARGET_EINVAL;
2627             }
2628 
2629             crlen = sizeof(cr);
2630             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2631                                        &cr, &crlen));
2632             if (ret < 0) {
2633                 return ret;
2634             }
2635             if (len > crlen) {
2636                 len = crlen;
2637             }
2638             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             __put_user(cr.pid, &tcr->pid);
2642             __put_user(cr.uid, &tcr->uid);
2643             __put_user(cr.gid, &tcr->gid);
2644             unlock_user_struct(tcr, optval_addr, 1);
2645             if (put_user_u32(len, optlen)) {
2646                 return -TARGET_EFAULT;
2647             }
2648             break;
2649         }
2650         case TARGET_SO_PEERSEC: {
2651             char *name;
2652 
2653             if (get_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             if (len < 0) {
2657                 return -TARGET_EINVAL;
2658             }
2659             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2660             if (!name) {
2661                 return -TARGET_EFAULT;
2662             }
2663             lv = len;
2664             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2665                                        name, &lv));
2666             if (put_user_u32(lv, optlen)) {
2667                 ret = -TARGET_EFAULT;
2668             }
2669             unlock_user(name, optval_addr, lv);
2670             break;
2671         }
2672         case TARGET_SO_LINGER:
2673         {
2674             struct linger lg;
2675             socklen_t lglen;
2676             struct target_linger *tlg;
2677 
2678             if (get_user_u32(len, optlen)) {
2679                 return -TARGET_EFAULT;
2680             }
2681             if (len < 0) {
2682                 return -TARGET_EINVAL;
2683             }
2684 
2685             lglen = sizeof(lg);
2686             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2687                                        &lg, &lglen));
2688             if (ret < 0) {
2689                 return ret;
2690             }
2691             if (len > lglen) {
2692                 len = lglen;
2693             }
2694             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2695                 return -TARGET_EFAULT;
2696             }
2697             __put_user(lg.l_onoff, &tlg->l_onoff);
2698             __put_user(lg.l_linger, &tlg->l_linger);
2699             unlock_user_struct(tlg, optval_addr, 1);
2700             if (put_user_u32(len, optlen)) {
2701                 return -TARGET_EFAULT;
2702             }
2703             break;
2704         }
2705         /* Options with 'int' argument.  */
2706         case TARGET_SO_DEBUG:
2707             optname = SO_DEBUG;
2708             goto int_case;
2709         case TARGET_SO_REUSEADDR:
2710             optname = SO_REUSEADDR;
2711             goto int_case;
2712 #ifdef SO_REUSEPORT
2713         case TARGET_SO_REUSEPORT:
2714             optname = SO_REUSEPORT;
2715             goto int_case;
2716 #endif
2717         case TARGET_SO_TYPE:
2718             optname = SO_TYPE;
2719             goto int_case;
2720         case TARGET_SO_ERROR:
2721             optname = SO_ERROR;
2722             goto int_case;
2723         case TARGET_SO_DONTROUTE:
2724             optname = SO_DONTROUTE;
2725             goto int_case;
2726         case TARGET_SO_BROADCAST:
2727             optname = SO_BROADCAST;
2728             goto int_case;
2729         case TARGET_SO_SNDBUF:
2730             optname = SO_SNDBUF;
2731             goto int_case;
2732         case TARGET_SO_RCVBUF:
2733             optname = SO_RCVBUF;
2734             goto int_case;
2735         case TARGET_SO_KEEPALIVE:
2736             optname = SO_KEEPALIVE;
2737             goto int_case;
2738         case TARGET_SO_OOBINLINE:
2739             optname = SO_OOBINLINE;
2740             goto int_case;
2741         case TARGET_SO_NO_CHECK:
2742             optname = SO_NO_CHECK;
2743             goto int_case;
2744         case TARGET_SO_PRIORITY:
2745             optname = SO_PRIORITY;
2746             goto int_case;
2747 #ifdef SO_BSDCOMPAT
2748         case TARGET_SO_BSDCOMPAT:
2749             optname = SO_BSDCOMPAT;
2750             goto int_case;
2751 #endif
2752         case TARGET_SO_PASSCRED:
2753             optname = SO_PASSCRED;
2754             goto int_case;
2755         case TARGET_SO_TIMESTAMP:
2756             optname = SO_TIMESTAMP;
2757             goto int_case;
2758         case TARGET_SO_RCVLOWAT:
2759             optname = SO_RCVLOWAT;
2760             goto int_case;
2761         case TARGET_SO_ACCEPTCONN:
2762             optname = SO_ACCEPTCONN;
2763             goto int_case;
2764         case TARGET_SO_PROTOCOL:
2765             optname = SO_PROTOCOL;
2766             goto int_case;
2767         case TARGET_SO_DOMAIN:
2768             optname = SO_DOMAIN;
2769             goto int_case;
2770         default:
2771             goto int_case;
2772         }
2773         break;
2774     case SOL_TCP:
2775     case SOL_UDP:
2776         /* TCP and UDP options all take an 'int' value.  */
2777     int_case:
2778         if (get_user_u32(len, optlen))
2779             return -TARGET_EFAULT;
2780         if (len < 0)
2781             return -TARGET_EINVAL;
2782         lv = sizeof(lv);
2783         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2784         if (ret < 0)
2785             return ret;
2786         switch (optname) {
2787         case SO_TYPE:
2788             val = host_to_target_sock_type(val);
2789             break;
2790         case SO_ERROR:
2791             val = host_to_target_errno(val);
2792             break;
2793         }
2794         if (len > lv)
2795             len = lv;
2796         if (len == 4) {
2797             if (put_user_u32(val, optval_addr))
2798                 return -TARGET_EFAULT;
2799         } else {
2800             if (put_user_u8(val, optval_addr))
2801                 return -TARGET_EFAULT;
2802         }
2803         if (put_user_u32(len, optlen))
2804             return -TARGET_EFAULT;
2805         break;
2806     case SOL_IP:
2807         switch(optname) {
2808         case IP_TOS:
2809         case IP_TTL:
2810         case IP_HDRINCL:
2811         case IP_ROUTER_ALERT:
2812         case IP_RECVOPTS:
2813         case IP_RETOPTS:
2814         case IP_PKTINFO:
2815         case IP_MTU_DISCOVER:
2816         case IP_RECVERR:
2817         case IP_RECVTOS:
2818 #ifdef IP_FREEBIND
2819         case IP_FREEBIND:
2820 #endif
2821         case IP_MULTICAST_TTL:
2822         case IP_MULTICAST_LOOP:
2823             if (get_user_u32(len, optlen))
2824                 return -TARGET_EFAULT;
2825             if (len < 0)
2826                 return -TARGET_EINVAL;
2827             lv = sizeof(lv);
2828             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2829             if (ret < 0)
2830                 return ret;
2831             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2832                 len = 1;
2833                 if (put_user_u32(len, optlen)
2834                     || put_user_u8(val, optval_addr))
2835                     return -TARGET_EFAULT;
2836             } else {
2837                 if (len > sizeof(int))
2838                     len = sizeof(int);
2839                 if (put_user_u32(len, optlen)
2840                     || put_user_u32(val, optval_addr))
2841                     return -TARGET_EFAULT;
2842             }
2843             break;
2844         default:
2845             ret = -TARGET_ENOPROTOOPT;
2846             break;
2847         }
2848         break;
2849     case SOL_IPV6:
2850         switch (optname) {
2851         case IPV6_MTU_DISCOVER:
2852         case IPV6_MTU:
2853         case IPV6_V6ONLY:
2854         case IPV6_RECVPKTINFO:
2855         case IPV6_UNICAST_HOPS:
2856         case IPV6_MULTICAST_HOPS:
2857         case IPV6_MULTICAST_LOOP:
2858         case IPV6_RECVERR:
2859         case IPV6_RECVHOPLIMIT:
2860         case IPV6_2292HOPLIMIT:
2861         case IPV6_CHECKSUM:
2862         case IPV6_ADDRFORM:
2863         case IPV6_2292PKTINFO:
2864         case IPV6_RECVTCLASS:
2865         case IPV6_RECVRTHDR:
2866         case IPV6_2292RTHDR:
2867         case IPV6_RECVHOPOPTS:
2868         case IPV6_2292HOPOPTS:
2869         case IPV6_RECVDSTOPTS:
2870         case IPV6_2292DSTOPTS:
2871         case IPV6_TCLASS:
2872         case IPV6_ADDR_PREFERENCES:
2873 #ifdef IPV6_RECVPATHMTU
2874         case IPV6_RECVPATHMTU:
2875 #endif
2876 #ifdef IPV6_TRANSPARENT
2877         case IPV6_TRANSPARENT:
2878 #endif
2879 #ifdef IPV6_FREEBIND
2880         case IPV6_FREEBIND:
2881 #endif
2882 #ifdef IPV6_RECVORIGDSTADDR
2883         case IPV6_RECVORIGDSTADDR:
2884 #endif
2885             if (get_user_u32(len, optlen))
2886                 return -TARGET_EFAULT;
2887             if (len < 0)
2888                 return -TARGET_EINVAL;
2889             lv = sizeof(lv);
2890             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2891             if (ret < 0)
2892                 return ret;
2893             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2894                 len = 1;
2895                 if (put_user_u32(len, optlen)
2896                     || put_user_u8(val, optval_addr))
2897                     return -TARGET_EFAULT;
2898             } else {
2899                 if (len > sizeof(int))
2900                     len = sizeof(int);
2901                 if (put_user_u32(len, optlen)
2902                     || put_user_u32(val, optval_addr))
2903                     return -TARGET_EFAULT;
2904             }
2905             break;
2906         default:
2907             ret = -TARGET_ENOPROTOOPT;
2908             break;
2909         }
2910         break;
2911 #ifdef SOL_NETLINK
2912     case SOL_NETLINK:
2913         switch (optname) {
2914         case NETLINK_PKTINFO:
2915         case NETLINK_BROADCAST_ERROR:
2916         case NETLINK_NO_ENOBUFS:
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2918         case NETLINK_LISTEN_ALL_NSID:
2919         case NETLINK_CAP_ACK:
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2921 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2922         case NETLINK_EXT_ACK:
2923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2925         case NETLINK_GET_STRICT_CHK:
2926 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2927             if (get_user_u32(len, optlen)) {
2928                 return -TARGET_EFAULT;
2929             }
2930             if (len != sizeof(val)) {
2931                 return -TARGET_EINVAL;
2932             }
2933             lv = len;
2934             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2935             if (ret < 0) {
2936                 return ret;
2937             }
2938             if (put_user_u32(lv, optlen)
2939                 || put_user_u32(val, optval_addr)) {
2940                 return -TARGET_EFAULT;
2941             }
2942             break;
2943 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2944         case NETLINK_LIST_MEMBERSHIPS:
2945         {
2946             uint32_t *results;
2947             int i;
2948             if (get_user_u32(len, optlen)) {
2949                 return -TARGET_EFAULT;
2950             }
2951             if (len < 0) {
2952                 return -TARGET_EINVAL;
2953             }
2954             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2955             if (!results && len > 0) {
2956                 return -TARGET_EFAULT;
2957             }
2958             lv = len;
2959             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2960             if (ret < 0) {
2961                 unlock_user(results, optval_addr, 0);
2962                 return ret;
2963             }
2964             /* swap host endianess to target endianess. */
2965             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2966                 results[i] = tswap32(results[i]);
2967             }
2968             if (put_user_u32(lv, optlen)) {
2969                 return -TARGET_EFAULT;
2970             }
2971             unlock_user(results, optval_addr, 0);
2972             break;
2973         }
2974 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2975         default:
2976             goto unimplemented;
2977         }
2978         break;
2979 #endif /* SOL_NETLINK */
2980     default:
2981     unimplemented:
2982         qemu_log_mask(LOG_UNIMP,
2983                       "getsockopt level=%d optname=%d not yet supported\n",
2984                       level, optname);
2985         ret = -TARGET_EOPNOTSUPP;
2986         break;
2987     }
2988     return ret;
2989 }
2990 
2991 /* Convert target low/high pair representing file offset into the host
2992  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2993  * as the kernel doesn't handle them either.
2994  */
2995 static void target_to_host_low_high(abi_ulong tlow,
2996                                     abi_ulong thigh,
2997                                     unsigned long *hlow,
2998                                     unsigned long *hhigh)
2999 {
3000     uint64_t off = tlow |
3001         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3002         TARGET_LONG_BITS / 2;
3003 
3004     *hlow = off;
3005     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3006 }
3007 
3008 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3009                                 abi_ulong count, int copy)
3010 {
3011     struct target_iovec *target_vec;
3012     struct iovec *vec;
3013     abi_ulong total_len, max_len;
3014     int i;
3015     int err = 0;
3016     bool bad_address = false;
3017 
3018     if (count == 0) {
3019         errno = 0;
3020         return NULL;
3021     }
3022     if (count > IOV_MAX) {
3023         errno = EINVAL;
3024         return NULL;
3025     }
3026 
3027     vec = g_try_new0(struct iovec, count);
3028     if (vec == NULL) {
3029         errno = ENOMEM;
3030         return NULL;
3031     }
3032 
3033     target_vec = lock_user(VERIFY_READ, target_addr,
3034                            count * sizeof(struct target_iovec), 1);
3035     if (target_vec == NULL) {
3036         err = EFAULT;
3037         goto fail2;
3038     }
3039 
3040     /* ??? If host page size > target page size, this will result in a
3041        value larger than what we can actually support.  */
3042     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3043     total_len = 0;
3044 
3045     for (i = 0; i < count; i++) {
3046         abi_ulong base = tswapal(target_vec[i].iov_base);
3047         abi_long len = tswapal(target_vec[i].iov_len);
3048 
3049         if (len < 0) {
3050             err = EINVAL;
3051             goto fail;
3052         } else if (len == 0) {
3053             /* Zero length pointer is ignored.  */
3054             vec[i].iov_base = 0;
3055         } else {
3056             vec[i].iov_base = lock_user(type, base, len, copy);
3057             /* If the first buffer pointer is bad, this is a fault.  But
3058              * subsequent bad buffers will result in a partial write; this
3059              * is realized by filling the vector with null pointers and
3060              * zero lengths. */
3061             if (!vec[i].iov_base) {
3062                 if (i == 0) {
3063                     err = EFAULT;
3064                     goto fail;
3065                 } else {
3066                     bad_address = true;
3067                 }
3068             }
3069             if (bad_address) {
3070                 len = 0;
3071             }
3072             if (len > max_len - total_len) {
3073                 len = max_len - total_len;
3074             }
3075         }
3076         vec[i].iov_len = len;
3077         total_len += len;
3078     }
3079 
3080     unlock_user(target_vec, target_addr, 0);
3081     return vec;
3082 
3083  fail:
3084     while (--i >= 0) {
3085         if (tswapal(target_vec[i].iov_len) > 0) {
3086             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3087         }
3088     }
3089     unlock_user(target_vec, target_addr, 0);
3090  fail2:
3091     g_free(vec);
3092     errno = err;
3093     return NULL;
3094 }
3095 
3096 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3097                          abi_ulong count, int copy)
3098 {
3099     struct target_iovec *target_vec;
3100     int i;
3101 
3102     target_vec = lock_user(VERIFY_READ, target_addr,
3103                            count * sizeof(struct target_iovec), 1);
3104     if (target_vec) {
3105         for (i = 0; i < count; i++) {
3106             abi_ulong base = tswapal(target_vec[i].iov_base);
3107             abi_long len = tswapal(target_vec[i].iov_len);
3108             if (len < 0) {
3109                 break;
3110             }
3111             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3112         }
3113         unlock_user(target_vec, target_addr, 0);
3114     }
3115 
3116     g_free(vec);
3117 }
3118 
3119 static inline int target_to_host_sock_type(int *type)
3120 {
3121     int host_type = 0;
3122     int target_type = *type;
3123 
3124     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3125     case TARGET_SOCK_DGRAM:
3126         host_type = SOCK_DGRAM;
3127         break;
3128     case TARGET_SOCK_STREAM:
3129         host_type = SOCK_STREAM;
3130         break;
3131     default:
3132         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3133         break;
3134     }
3135     if (target_type & TARGET_SOCK_CLOEXEC) {
3136 #if defined(SOCK_CLOEXEC)
3137         host_type |= SOCK_CLOEXEC;
3138 #else
3139         return -TARGET_EINVAL;
3140 #endif
3141     }
3142     if (target_type & TARGET_SOCK_NONBLOCK) {
3143 #if defined(SOCK_NONBLOCK)
3144         host_type |= SOCK_NONBLOCK;
3145 #elif !defined(O_NONBLOCK)
3146         return -TARGET_EINVAL;
3147 #endif
3148     }
3149     *type = host_type;
3150     return 0;
3151 }
3152 
3153 /* Try to emulate socket type flags after socket creation.  */
3154 static int sock_flags_fixup(int fd, int target_type)
3155 {
3156 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3157     if (target_type & TARGET_SOCK_NONBLOCK) {
3158         int flags = fcntl(fd, F_GETFL);
3159         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3160             close(fd);
3161             return -TARGET_EINVAL;
3162         }
3163     }
3164 #endif
3165     return fd;
3166 }
3167 
3168 /* do_socket() Must return target values and target errnos. */
3169 static abi_long do_socket(int domain, int type, int protocol)
3170 {
3171     int target_type = type;
3172     int ret;
3173 
3174     ret = target_to_host_sock_type(&type);
3175     if (ret) {
3176         return ret;
3177     }
3178 
3179     if (domain == PF_NETLINK && !(
3180 #ifdef CONFIG_RTNETLINK
3181          protocol == NETLINK_ROUTE ||
3182 #endif
3183          protocol == NETLINK_KOBJECT_UEVENT ||
3184          protocol == NETLINK_AUDIT)) {
3185         return -TARGET_EPROTONOSUPPORT;
3186     }
3187 
3188     if (domain == AF_PACKET ||
3189         (domain == AF_INET && type == SOCK_PACKET)) {
3190         protocol = tswap16(protocol);
3191     }
3192 
3193     ret = get_errno(socket(domain, type, protocol));
3194     if (ret >= 0) {
3195         ret = sock_flags_fixup(ret, target_type);
3196         if (type == SOCK_PACKET) {
3197             /* Manage an obsolete case :
3198              * if socket type is SOCK_PACKET, bind by name
3199              */
3200             fd_trans_register(ret, &target_packet_trans);
3201         } else if (domain == PF_NETLINK) {
3202             switch (protocol) {
3203 #ifdef CONFIG_RTNETLINK
3204             case NETLINK_ROUTE:
3205                 fd_trans_register(ret, &target_netlink_route_trans);
3206                 break;
3207 #endif
3208             case NETLINK_KOBJECT_UEVENT:
3209                 /* nothing to do: messages are strings */
3210                 break;
3211             case NETLINK_AUDIT:
3212                 fd_trans_register(ret, &target_netlink_audit_trans);
3213                 break;
3214             default:
3215                 g_assert_not_reached();
3216             }
3217         }
3218     }
3219     return ret;
3220 }
3221 
3222 /* do_bind() Must return target values and target errnos. */
3223 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3224                         socklen_t addrlen)
3225 {
3226     void *addr;
3227     abi_long ret;
3228 
3229     if ((int)addrlen < 0) {
3230         return -TARGET_EINVAL;
3231     }
3232 
3233     addr = alloca(addrlen+1);
3234 
3235     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3236     if (ret)
3237         return ret;
3238 
3239     return get_errno(bind(sockfd, addr, addrlen));
3240 }
3241 
3242 /* do_connect() Must return target values and target errnos. */
3243 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3244                            socklen_t addrlen)
3245 {
3246     void *addr;
3247     abi_long ret;
3248 
3249     if ((int)addrlen < 0) {
3250         return -TARGET_EINVAL;
3251     }
3252 
3253     addr = alloca(addrlen+1);
3254 
3255     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3256     if (ret)
3257         return ret;
3258 
3259     return get_errno(safe_connect(sockfd, addr, addrlen));
3260 }
3261 
3262 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3263 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3264                                       int flags, int send)
3265 {
3266     abi_long ret, len;
3267     struct msghdr msg;
3268     abi_ulong count;
3269     struct iovec *vec;
3270     abi_ulong target_vec;
3271 
3272     if (msgp->msg_name) {
3273         msg.msg_namelen = tswap32(msgp->msg_namelen);
3274         msg.msg_name = alloca(msg.msg_namelen+1);
3275         ret = target_to_host_sockaddr(fd, msg.msg_name,
3276                                       tswapal(msgp->msg_name),
3277                                       msg.msg_namelen);
3278         if (ret == -TARGET_EFAULT) {
3279             /* For connected sockets msg_name and msg_namelen must
3280              * be ignored, so returning EFAULT immediately is wrong.
3281              * Instead, pass a bad msg_name to the host kernel, and
3282              * let it decide whether to return EFAULT or not.
3283              */
3284             msg.msg_name = (void *)-1;
3285         } else if (ret) {
3286             goto out2;
3287         }
3288     } else {
3289         msg.msg_name = NULL;
3290         msg.msg_namelen = 0;
3291     }
3292     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3293     msg.msg_control = alloca(msg.msg_controllen);
3294     memset(msg.msg_control, 0, msg.msg_controllen);
3295 
3296     msg.msg_flags = tswap32(msgp->msg_flags);
3297 
3298     count = tswapal(msgp->msg_iovlen);
3299     target_vec = tswapal(msgp->msg_iov);
3300 
3301     if (count > IOV_MAX) {
3302         /* sendrcvmsg returns a different errno for this condition than
3303          * readv/writev, so we must catch it here before lock_iovec() does.
3304          */
3305         ret = -TARGET_EMSGSIZE;
3306         goto out2;
3307     }
3308 
3309     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3310                      target_vec, count, send);
3311     if (vec == NULL) {
3312         ret = -host_to_target_errno(errno);
3313         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3314         if (!send || ret) {
3315             goto out2;
3316         }
3317     }
3318     msg.msg_iovlen = count;
3319     msg.msg_iov = vec;
3320 
3321     if (send) {
3322         if (fd_trans_target_to_host_data(fd)) {
3323             void *host_msg;
3324 
3325             host_msg = g_malloc(msg.msg_iov->iov_len);
3326             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3327             ret = fd_trans_target_to_host_data(fd)(host_msg,
3328                                                    msg.msg_iov->iov_len);
3329             if (ret >= 0) {
3330                 msg.msg_iov->iov_base = host_msg;
3331                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3332             }
3333             g_free(host_msg);
3334         } else {
3335             ret = target_to_host_cmsg(&msg, msgp);
3336             if (ret == 0) {
3337                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3338             }
3339         }
3340     } else {
3341         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3342         if (!is_error(ret)) {
3343             len = ret;
3344             if (fd_trans_host_to_target_data(fd)) {
3345                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3346                                                MIN(msg.msg_iov->iov_len, len));
3347             }
3348             if (!is_error(ret)) {
3349                 ret = host_to_target_cmsg(msgp, &msg);
3350             }
3351             if (!is_error(ret)) {
3352                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3353                 msgp->msg_flags = tswap32(msg.msg_flags);
3354                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3355                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3356                                     msg.msg_name, msg.msg_namelen);
3357                     if (ret) {
3358                         goto out;
3359                     }
3360                 }
3361 
3362                 ret = len;
3363             }
3364         }
3365     }
3366 
3367 out:
3368     if (vec) {
3369         unlock_iovec(vec, target_vec, count, !send);
3370     }
3371 out2:
3372     return ret;
3373 }
3374 
3375 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3376                                int flags, int send)
3377 {
3378     abi_long ret;
3379     struct target_msghdr *msgp;
3380 
3381     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3382                           msgp,
3383                           target_msg,
3384                           send ? 1 : 0)) {
3385         return -TARGET_EFAULT;
3386     }
3387     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3388     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3389     return ret;
3390 }
3391 
3392 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3393  * so it might not have this *mmsg-specific flag either.
3394  */
3395 #ifndef MSG_WAITFORONE
3396 #define MSG_WAITFORONE 0x10000
3397 #endif
3398 
3399 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3400                                 unsigned int vlen, unsigned int flags,
3401                                 int send)
3402 {
3403     struct target_mmsghdr *mmsgp;
3404     abi_long ret = 0;
3405     int i;
3406 
3407     if (vlen > UIO_MAXIOV) {
3408         vlen = UIO_MAXIOV;
3409     }
3410 
3411     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3412     if (!mmsgp) {
3413         return -TARGET_EFAULT;
3414     }
3415 
3416     for (i = 0; i < vlen; i++) {
3417         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3418         if (is_error(ret)) {
3419             break;
3420         }
3421         mmsgp[i].msg_len = tswap32(ret);
3422         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3423         if (flags & MSG_WAITFORONE) {
3424             flags |= MSG_DONTWAIT;
3425         }
3426     }
3427 
3428     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3429 
3430     /* Return number of datagrams sent if we sent any at all;
3431      * otherwise return the error.
3432      */
3433     if (i) {
3434         return i;
3435     }
3436     return ret;
3437 }
3438 
3439 /* do_accept4() Must return target values and target errnos. */
3440 static abi_long do_accept4(int fd, abi_ulong target_addr,
3441                            abi_ulong target_addrlen_addr, int flags)
3442 {
3443     socklen_t addrlen, ret_addrlen;
3444     void *addr;
3445     abi_long ret;
3446     int host_flags;
3447 
3448     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3449         return -TARGET_EINVAL;
3450     }
3451 
3452     host_flags = 0;
3453     if (flags & TARGET_SOCK_NONBLOCK) {
3454         host_flags |= SOCK_NONBLOCK;
3455     }
3456     if (flags & TARGET_SOCK_CLOEXEC) {
3457         host_flags |= SOCK_CLOEXEC;
3458     }
3459 
3460     if (target_addr == 0) {
3461         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3462     }
3463 
3464     /* linux returns EFAULT if addrlen pointer is invalid */
3465     if (get_user_u32(addrlen, target_addrlen_addr))
3466         return -TARGET_EFAULT;
3467 
3468     if ((int)addrlen < 0) {
3469         return -TARGET_EINVAL;
3470     }
3471 
3472     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3473         return -TARGET_EFAULT;
3474     }
3475 
3476     addr = alloca(addrlen);
3477 
3478     ret_addrlen = addrlen;
3479     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3480     if (!is_error(ret)) {
3481         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3482         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3483             ret = -TARGET_EFAULT;
3484         }
3485     }
3486     return ret;
3487 }
3488 
3489 /* do_getpeername() Must return target values and target errnos. */
3490 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3491                                abi_ulong target_addrlen_addr)
3492 {
3493     socklen_t addrlen, ret_addrlen;
3494     void *addr;
3495     abi_long ret;
3496 
3497     if (get_user_u32(addrlen, target_addrlen_addr))
3498         return -TARGET_EFAULT;
3499 
3500     if ((int)addrlen < 0) {
3501         return -TARGET_EINVAL;
3502     }
3503 
3504     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3505         return -TARGET_EFAULT;
3506     }
3507 
3508     addr = alloca(addrlen);
3509 
3510     ret_addrlen = addrlen;
3511     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3512     if (!is_error(ret)) {
3513         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3514         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3515             ret = -TARGET_EFAULT;
3516         }
3517     }
3518     return ret;
3519 }
3520 
3521 /* do_getsockname() Must return target values and target errnos. */
3522 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3523                                abi_ulong target_addrlen_addr)
3524 {
3525     socklen_t addrlen, ret_addrlen;
3526     void *addr;
3527     abi_long ret;
3528 
3529     if (get_user_u32(addrlen, target_addrlen_addr))
3530         return -TARGET_EFAULT;
3531 
3532     if ((int)addrlen < 0) {
3533         return -TARGET_EINVAL;
3534     }
3535 
3536     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3537         return -TARGET_EFAULT;
3538     }
3539 
3540     addr = alloca(addrlen);
3541 
3542     ret_addrlen = addrlen;
3543     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3544     if (!is_error(ret)) {
3545         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3546         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3547             ret = -TARGET_EFAULT;
3548         }
3549     }
3550     return ret;
3551 }
3552 
3553 /* do_socketpair() Must return target values and target errnos. */
3554 static abi_long do_socketpair(int domain, int type, int protocol,
3555                               abi_ulong target_tab_addr)
3556 {
3557     int tab[2];
3558     abi_long ret;
3559 
3560     target_to_host_sock_type(&type);
3561 
3562     ret = get_errno(socketpair(domain, type, protocol, tab));
3563     if (!is_error(ret)) {
3564         if (put_user_s32(tab[0], target_tab_addr)
3565             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3566             ret = -TARGET_EFAULT;
3567     }
3568     return ret;
3569 }
3570 
3571 /* do_sendto() Must return target values and target errnos. */
3572 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3573                           abi_ulong target_addr, socklen_t addrlen)
3574 {
3575     void *addr;
3576     void *host_msg;
3577     void *copy_msg = NULL;
3578     abi_long ret;
3579 
3580     if ((int)addrlen < 0) {
3581         return -TARGET_EINVAL;
3582     }
3583 
3584     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3585     if (!host_msg)
3586         return -TARGET_EFAULT;
3587     if (fd_trans_target_to_host_data(fd)) {
3588         copy_msg = host_msg;
3589         host_msg = g_malloc(len);
3590         memcpy(host_msg, copy_msg, len);
3591         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3592         if (ret < 0) {
3593             goto fail;
3594         }
3595     }
3596     if (target_addr) {
3597         addr = alloca(addrlen+1);
3598         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3599         if (ret) {
3600             goto fail;
3601         }
3602         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3603     } else {
3604         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3605     }
3606 fail:
3607     if (copy_msg) {
3608         g_free(host_msg);
3609         host_msg = copy_msg;
3610     }
3611     unlock_user(host_msg, msg, 0);
3612     return ret;
3613 }
3614 
3615 /* do_recvfrom() Must return target values and target errnos. */
3616 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3617                             abi_ulong target_addr,
3618                             abi_ulong target_addrlen)
3619 {
3620     socklen_t addrlen, ret_addrlen;
3621     void *addr;
3622     void *host_msg;
3623     abi_long ret;
3624 
3625     if (!msg) {
3626         host_msg = NULL;
3627     } else {
3628         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3629         if (!host_msg) {
3630             return -TARGET_EFAULT;
3631         }
3632     }
3633     if (target_addr) {
3634         if (get_user_u32(addrlen, target_addrlen)) {
3635             ret = -TARGET_EFAULT;
3636             goto fail;
3637         }
3638         if ((int)addrlen < 0) {
3639             ret = -TARGET_EINVAL;
3640             goto fail;
3641         }
3642         addr = alloca(addrlen);
3643         ret_addrlen = addrlen;
3644         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3645                                       addr, &ret_addrlen));
3646     } else {
3647         addr = NULL; /* To keep compiler quiet.  */
3648         addrlen = 0; /* To keep compiler quiet.  */
3649         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3650     }
3651     if (!is_error(ret)) {
3652         if (fd_trans_host_to_target_data(fd)) {
3653             abi_long trans;
3654             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3655             if (is_error(trans)) {
3656                 ret = trans;
3657                 goto fail;
3658             }
3659         }
3660         if (target_addr) {
3661             host_to_target_sockaddr(target_addr, addr,
3662                                     MIN(addrlen, ret_addrlen));
3663             if (put_user_u32(ret_addrlen, target_addrlen)) {
3664                 ret = -TARGET_EFAULT;
3665                 goto fail;
3666             }
3667         }
3668         unlock_user(host_msg, msg, len);
3669     } else {
3670 fail:
3671         unlock_user(host_msg, msg, 0);
3672     }
3673     return ret;
3674 }
3675 
3676 #ifdef TARGET_NR_socketcall
3677 /* do_socketcall() must return target values and target errnos. */
3678 static abi_long do_socketcall(int num, abi_ulong vptr)
3679 {
3680     static const unsigned nargs[] = { /* number of arguments per operation */
3681         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3682         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3683         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3684         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3685         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3686         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3687         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3688         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3689         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3690         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3691         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3692         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3693         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3694         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3695         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3696         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3697         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3698         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3699         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3700         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3701     };
3702     abi_long a[6]; /* max 6 args */
3703     unsigned i;
3704 
3705     /* check the range of the first argument num */
3706     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3707     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3708         return -TARGET_EINVAL;
3709     }
3710     /* ensure we have space for args */
3711     if (nargs[num] > ARRAY_SIZE(a)) {
3712         return -TARGET_EINVAL;
3713     }
3714     /* collect the arguments in a[] according to nargs[] */
3715     for (i = 0; i < nargs[num]; ++i) {
3716         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3717             return -TARGET_EFAULT;
3718         }
3719     }
3720     /* now when we have the args, invoke the appropriate underlying function */
3721     switch (num) {
3722     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3723         return do_socket(a[0], a[1], a[2]);
3724     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3725         return do_bind(a[0], a[1], a[2]);
3726     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3727         return do_connect(a[0], a[1], a[2]);
3728     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3729         return get_errno(listen(a[0], a[1]));
3730     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3731         return do_accept4(a[0], a[1], a[2], 0);
3732     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3733         return do_getsockname(a[0], a[1], a[2]);
3734     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3735         return do_getpeername(a[0], a[1], a[2]);
3736     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3737         return do_socketpair(a[0], a[1], a[2], a[3]);
3738     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3739         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3740     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3741         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3742     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3743         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3744     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3745         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3746     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3747         return get_errno(shutdown(a[0], a[1]));
3748     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3749         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3750     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3751         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3752     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3753         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3754     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3755         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3756     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3757         return do_accept4(a[0], a[1], a[2], a[3]);
3758     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3759         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3760     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3761         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3762     default:
3763         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3764         return -TARGET_EINVAL;
3765     }
3766 }
3767 #endif
3768 
3769 #define N_SHM_REGIONS	32
3770 
3771 static struct shm_region {
3772     abi_ulong start;
3773     abi_ulong size;
3774     bool in_use;
3775 } shm_regions[N_SHM_REGIONS];
3776 
3777 #ifndef TARGET_SEMID64_DS
3778 /* asm-generic version of this struct */
3779 struct target_semid64_ds
3780 {
3781   struct target_ipc_perm sem_perm;
3782   abi_ulong sem_otime;
3783 #if TARGET_ABI_BITS == 32
3784   abi_ulong __unused1;
3785 #endif
3786   abi_ulong sem_ctime;
3787 #if TARGET_ABI_BITS == 32
3788   abi_ulong __unused2;
3789 #endif
3790   abi_ulong sem_nsems;
3791   abi_ulong __unused3;
3792   abi_ulong __unused4;
3793 };
3794 #endif
3795 
3796 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3797                                                abi_ulong target_addr)
3798 {
3799     struct target_ipc_perm *target_ip;
3800     struct target_semid64_ds *target_sd;
3801 
3802     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3803         return -TARGET_EFAULT;
3804     target_ip = &(target_sd->sem_perm);
3805     host_ip->__key = tswap32(target_ip->__key);
3806     host_ip->uid = tswap32(target_ip->uid);
3807     host_ip->gid = tswap32(target_ip->gid);
3808     host_ip->cuid = tswap32(target_ip->cuid);
3809     host_ip->cgid = tswap32(target_ip->cgid);
3810 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3811     host_ip->mode = tswap32(target_ip->mode);
3812 #else
3813     host_ip->mode = tswap16(target_ip->mode);
3814 #endif
3815 #if defined(TARGET_PPC)
3816     host_ip->__seq = tswap32(target_ip->__seq);
3817 #else
3818     host_ip->__seq = tswap16(target_ip->__seq);
3819 #endif
3820     unlock_user_struct(target_sd, target_addr, 0);
3821     return 0;
3822 }
3823 
3824 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3825                                                struct ipc_perm *host_ip)
3826 {
3827     struct target_ipc_perm *target_ip;
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3831         return -TARGET_EFAULT;
3832     target_ip = &(target_sd->sem_perm);
3833     target_ip->__key = tswap32(host_ip->__key);
3834     target_ip->uid = tswap32(host_ip->uid);
3835     target_ip->gid = tswap32(host_ip->gid);
3836     target_ip->cuid = tswap32(host_ip->cuid);
3837     target_ip->cgid = tswap32(host_ip->cgid);
3838 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3839     target_ip->mode = tswap32(host_ip->mode);
3840 #else
3841     target_ip->mode = tswap16(host_ip->mode);
3842 #endif
3843 #if defined(TARGET_PPC)
3844     target_ip->__seq = tswap32(host_ip->__seq);
3845 #else
3846     target_ip->__seq = tswap16(host_ip->__seq);
3847 #endif
3848     unlock_user_struct(target_sd, target_addr, 1);
3849     return 0;
3850 }
3851 
3852 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3853                                                abi_ulong target_addr)
3854 {
3855     struct target_semid64_ds *target_sd;
3856 
3857     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3858         return -TARGET_EFAULT;
3859     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3860         return -TARGET_EFAULT;
3861     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3862     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3863     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3864     unlock_user_struct(target_sd, target_addr, 0);
3865     return 0;
3866 }
3867 
3868 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3869                                                struct semid_ds *host_sd)
3870 {
3871     struct target_semid64_ds *target_sd;
3872 
3873     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3874         return -TARGET_EFAULT;
3875     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3876         return -TARGET_EFAULT;
3877     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3878     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3879     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3880     unlock_user_struct(target_sd, target_addr, 1);
3881     return 0;
3882 }
3883 
3884 struct target_seminfo {
3885     int semmap;
3886     int semmni;
3887     int semmns;
3888     int semmnu;
3889     int semmsl;
3890     int semopm;
3891     int semume;
3892     int semusz;
3893     int semvmx;
3894     int semaem;
3895 };
3896 
3897 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3898                                               struct seminfo *host_seminfo)
3899 {
3900     struct target_seminfo *target_seminfo;
3901     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3902         return -TARGET_EFAULT;
3903     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3904     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3905     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3906     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3907     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3908     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3909     __put_user(host_seminfo->semume, &target_seminfo->semume);
3910     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3911     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3912     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3913     unlock_user_struct(target_seminfo, target_addr, 1);
3914     return 0;
3915 }
3916 
3917 union semun {
3918 	int val;
3919 	struct semid_ds *buf;
3920 	unsigned short *array;
3921 	struct seminfo *__buf;
3922 };
3923 
3924 union target_semun {
3925 	int val;
3926 	abi_ulong buf;
3927 	abi_ulong array;
3928 	abi_ulong __buf;
3929 };
3930 
3931 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3932                                                abi_ulong target_addr)
3933 {
3934     int nsems;
3935     unsigned short *array;
3936     union semun semun;
3937     struct semid_ds semid_ds;
3938     int i, ret;
3939 
3940     semun.buf = &semid_ds;
3941 
3942     ret = semctl(semid, 0, IPC_STAT, semun);
3943     if (ret == -1)
3944         return get_errno(ret);
3945 
3946     nsems = semid_ds.sem_nsems;
3947 
3948     *host_array = g_try_new(unsigned short, nsems);
3949     if (!*host_array) {
3950         return -TARGET_ENOMEM;
3951     }
3952     array = lock_user(VERIFY_READ, target_addr,
3953                       nsems*sizeof(unsigned short), 1);
3954     if (!array) {
3955         g_free(*host_array);
3956         return -TARGET_EFAULT;
3957     }
3958 
3959     for(i=0; i<nsems; i++) {
3960         __get_user((*host_array)[i], &array[i]);
3961     }
3962     unlock_user(array, target_addr, 0);
3963 
3964     return 0;
3965 }
3966 
3967 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3968                                                unsigned short **host_array)
3969 {
3970     int nsems;
3971     unsigned short *array;
3972     union semun semun;
3973     struct semid_ds semid_ds;
3974     int i, ret;
3975 
3976     semun.buf = &semid_ds;
3977 
3978     ret = semctl(semid, 0, IPC_STAT, semun);
3979     if (ret == -1)
3980         return get_errno(ret);
3981 
3982     nsems = semid_ds.sem_nsems;
3983 
3984     array = lock_user(VERIFY_WRITE, target_addr,
3985                       nsems*sizeof(unsigned short), 0);
3986     if (!array)
3987         return -TARGET_EFAULT;
3988 
3989     for(i=0; i<nsems; i++) {
3990         __put_user((*host_array)[i], &array[i]);
3991     }
3992     g_free(*host_array);
3993     unlock_user(array, target_addr, 1);
3994 
3995     return 0;
3996 }
3997 
3998 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3999                                  abi_ulong target_arg)
4000 {
4001     union target_semun target_su = { .buf = target_arg };
4002     union semun arg;
4003     struct semid_ds dsarg;
4004     unsigned short *array = NULL;
4005     struct seminfo seminfo;
4006     abi_long ret = -TARGET_EINVAL;
4007     abi_long err;
4008     cmd &= 0xff;
4009 
4010     switch( cmd ) {
4011 	case GETVAL:
4012 	case SETVAL:
4013             /* In 64 bit cross-endian situations, we will erroneously pick up
4014              * the wrong half of the union for the "val" element.  To rectify
4015              * this, the entire 8-byte structure is byteswapped, followed by
4016 	     * a swap of the 4 byte val field. In other cases, the data is
4017 	     * already in proper host byte order. */
4018 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4019 		target_su.buf = tswapal(target_su.buf);
4020 		arg.val = tswap32(target_su.val);
4021 	    } else {
4022 		arg.val = target_su.val;
4023 	    }
4024             ret = get_errno(semctl(semid, semnum, cmd, arg));
4025             break;
4026 	case GETALL:
4027 	case SETALL:
4028             err = target_to_host_semarray(semid, &array, target_su.array);
4029             if (err)
4030                 return err;
4031             arg.array = array;
4032             ret = get_errno(semctl(semid, semnum, cmd, arg));
4033             err = host_to_target_semarray(semid, target_su.array, &array);
4034             if (err)
4035                 return err;
4036             break;
4037 	case IPC_STAT:
4038 	case IPC_SET:
4039 	case SEM_STAT:
4040             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4041             if (err)
4042                 return err;
4043             arg.buf = &dsarg;
4044             ret = get_errno(semctl(semid, semnum, cmd, arg));
4045             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4046             if (err)
4047                 return err;
4048             break;
4049 	case IPC_INFO:
4050 	case SEM_INFO:
4051             arg.__buf = &seminfo;
4052             ret = get_errno(semctl(semid, semnum, cmd, arg));
4053             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4054             if (err)
4055                 return err;
4056             break;
4057 	case IPC_RMID:
4058 	case GETPID:
4059 	case GETNCNT:
4060 	case GETZCNT:
4061             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4062             break;
4063     }
4064 
4065     return ret;
4066 }
4067 
4068 struct target_sembuf {
4069     unsigned short sem_num;
4070     short sem_op;
4071     short sem_flg;
4072 };
4073 
4074 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4075                                              abi_ulong target_addr,
4076                                              unsigned nsops)
4077 {
4078     struct target_sembuf *target_sembuf;
4079     int i;
4080 
4081     target_sembuf = lock_user(VERIFY_READ, target_addr,
4082                               nsops*sizeof(struct target_sembuf), 1);
4083     if (!target_sembuf)
4084         return -TARGET_EFAULT;
4085 
4086     for(i=0; i<nsops; i++) {
4087         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4088         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4089         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4090     }
4091 
4092     unlock_user(target_sembuf, target_addr, 0);
4093 
4094     return 0;
4095 }
4096 
4097 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4098     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4099 
4100 /*
4101  * This macro is required to handle the s390 variants, which passes the
4102  * arguments in a different order than default.
4103  */
4104 #ifdef __s390x__
4105 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4106   (__nsops), (__timeout), (__sops)
4107 #else
4108 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4109   (__nsops), 0, (__sops), (__timeout)
4110 #endif
4111 
4112 static inline abi_long do_semtimedop(int semid,
4113                                      abi_long ptr,
4114                                      unsigned nsops,
4115                                      abi_long timeout, bool time64)
4116 {
4117     struct sembuf *sops;
4118     struct timespec ts, *pts = NULL;
4119     abi_long ret;
4120 
4121     if (timeout) {
4122         pts = &ts;
4123         if (time64) {
4124             if (target_to_host_timespec64(pts, timeout)) {
4125                 return -TARGET_EFAULT;
4126             }
4127         } else {
4128             if (target_to_host_timespec(pts, timeout)) {
4129                 return -TARGET_EFAULT;
4130             }
4131         }
4132     }
4133 
4134     if (nsops > TARGET_SEMOPM) {
4135         return -TARGET_E2BIG;
4136     }
4137 
4138     sops = g_new(struct sembuf, nsops);
4139 
4140     if (target_to_host_sembuf(sops, ptr, nsops)) {
4141         g_free(sops);
4142         return -TARGET_EFAULT;
4143     }
4144 
4145     ret = -TARGET_ENOSYS;
4146 #ifdef __NR_semtimedop
4147     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4148 #endif
4149 #ifdef __NR_ipc
4150     if (ret == -TARGET_ENOSYS) {
4151         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4152                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4153     }
4154 #endif
4155     g_free(sops);
4156     return ret;
4157 }
4158 #endif
4159 
4160 struct target_msqid_ds
4161 {
4162     struct target_ipc_perm msg_perm;
4163     abi_ulong msg_stime;
4164 #if TARGET_ABI_BITS == 32
4165     abi_ulong __unused1;
4166 #endif
4167     abi_ulong msg_rtime;
4168 #if TARGET_ABI_BITS == 32
4169     abi_ulong __unused2;
4170 #endif
4171     abi_ulong msg_ctime;
4172 #if TARGET_ABI_BITS == 32
4173     abi_ulong __unused3;
4174 #endif
4175     abi_ulong __msg_cbytes;
4176     abi_ulong msg_qnum;
4177     abi_ulong msg_qbytes;
4178     abi_ulong msg_lspid;
4179     abi_ulong msg_lrpid;
4180     abi_ulong __unused4;
4181     abi_ulong __unused5;
4182 };
4183 
4184 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4185                                                abi_ulong target_addr)
4186 {
4187     struct target_msqid_ds *target_md;
4188 
4189     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4190         return -TARGET_EFAULT;
4191     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4192         return -TARGET_EFAULT;
4193     host_md->msg_stime = tswapal(target_md->msg_stime);
4194     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4195     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4196     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4197     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4198     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4199     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4200     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4201     unlock_user_struct(target_md, target_addr, 0);
4202     return 0;
4203 }
4204 
4205 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4206                                                struct msqid_ds *host_md)
4207 {
4208     struct target_msqid_ds *target_md;
4209 
4210     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4211         return -TARGET_EFAULT;
4212     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4213         return -TARGET_EFAULT;
4214     target_md->msg_stime = tswapal(host_md->msg_stime);
4215     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4216     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4217     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4218     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4219     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4220     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4221     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4222     unlock_user_struct(target_md, target_addr, 1);
4223     return 0;
4224 }
4225 
4226 struct target_msginfo {
4227     int msgpool;
4228     int msgmap;
4229     int msgmax;
4230     int msgmnb;
4231     int msgmni;
4232     int msgssz;
4233     int msgtql;
4234     unsigned short int msgseg;
4235 };
4236 
4237 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4238                                               struct msginfo *host_msginfo)
4239 {
4240     struct target_msginfo *target_msginfo;
4241     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4242         return -TARGET_EFAULT;
4243     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4244     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4245     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4246     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4247     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4248     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4249     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4250     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4251     unlock_user_struct(target_msginfo, target_addr, 1);
4252     return 0;
4253 }
4254 
4255 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4256 {
4257     struct msqid_ds dsarg;
4258     struct msginfo msginfo;
4259     abi_long ret = -TARGET_EINVAL;
4260 
4261     cmd &= 0xff;
4262 
4263     switch (cmd) {
4264     case IPC_STAT:
4265     case IPC_SET:
4266     case MSG_STAT:
4267         if (target_to_host_msqid_ds(&dsarg,ptr))
4268             return -TARGET_EFAULT;
4269         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4270         if (host_to_target_msqid_ds(ptr,&dsarg))
4271             return -TARGET_EFAULT;
4272         break;
4273     case IPC_RMID:
4274         ret = get_errno(msgctl(msgid, cmd, NULL));
4275         break;
4276     case IPC_INFO:
4277     case MSG_INFO:
4278         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4279         if (host_to_target_msginfo(ptr, &msginfo))
4280             return -TARGET_EFAULT;
4281         break;
4282     }
4283 
4284     return ret;
4285 }
4286 
4287 struct target_msgbuf {
4288     abi_long mtype;
4289     char	mtext[1];
4290 };
4291 
4292 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4293                                  ssize_t msgsz, int msgflg)
4294 {
4295     struct target_msgbuf *target_mb;
4296     struct msgbuf *host_mb;
4297     abi_long ret = 0;
4298 
4299     if (msgsz < 0) {
4300         return -TARGET_EINVAL;
4301     }
4302 
4303     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4304         return -TARGET_EFAULT;
4305     host_mb = g_try_malloc(msgsz + sizeof(long));
4306     if (!host_mb) {
4307         unlock_user_struct(target_mb, msgp, 0);
4308         return -TARGET_ENOMEM;
4309     }
4310     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4311     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4312     ret = -TARGET_ENOSYS;
4313 #ifdef __NR_msgsnd
4314     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4315 #endif
4316 #ifdef __NR_ipc
4317     if (ret == -TARGET_ENOSYS) {
4318 #ifdef __s390x__
4319         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4320                                  host_mb));
4321 #else
4322         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4323                                  host_mb, 0));
4324 #endif
4325     }
4326 #endif
4327     g_free(host_mb);
4328     unlock_user_struct(target_mb, msgp, 0);
4329 
4330     return ret;
4331 }
4332 
4333 #ifdef __NR_ipc
4334 #if defined(__sparc__)
4335 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4337 #elif defined(__s390x__)
4338 /* The s390 sys_ipc variant has only five parameters.  */
4339 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4340     ((long int[]){(long int)__msgp, __msgtyp})
4341 #else
4342 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4343     ((long int[]){(long int)__msgp, __msgtyp}), 0
4344 #endif
4345 #endif
4346 
4347 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4348                                  ssize_t msgsz, abi_long msgtyp,
4349                                  int msgflg)
4350 {
4351     struct target_msgbuf *target_mb;
4352     char *target_mtext;
4353     struct msgbuf *host_mb;
4354     abi_long ret = 0;
4355 
4356     if (msgsz < 0) {
4357         return -TARGET_EINVAL;
4358     }
4359 
4360     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4361         return -TARGET_EFAULT;
4362 
4363     host_mb = g_try_malloc(msgsz + sizeof(long));
4364     if (!host_mb) {
4365         ret = -TARGET_ENOMEM;
4366         goto end;
4367     }
4368     ret = -TARGET_ENOSYS;
4369 #ifdef __NR_msgrcv
4370     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4371 #endif
4372 #ifdef __NR_ipc
4373     if (ret == -TARGET_ENOSYS) {
4374         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4375                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4376     }
4377 #endif
4378 
4379     if (ret > 0) {
4380         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4381         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4382         if (!target_mtext) {
4383             ret = -TARGET_EFAULT;
4384             goto end;
4385         }
4386         memcpy(target_mb->mtext, host_mb->mtext, ret);
4387         unlock_user(target_mtext, target_mtext_addr, ret);
4388     }
4389 
4390     target_mb->mtype = tswapal(host_mb->mtype);
4391 
4392 end:
4393     if (target_mb)
4394         unlock_user_struct(target_mb, msgp, 1);
4395     g_free(host_mb);
4396     return ret;
4397 }
4398 
4399 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4400                                                abi_ulong target_addr)
4401 {
4402     struct target_shmid_ds *target_sd;
4403 
4404     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4405         return -TARGET_EFAULT;
4406     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4407         return -TARGET_EFAULT;
4408     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4409     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4410     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4411     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4412     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4413     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4414     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4415     unlock_user_struct(target_sd, target_addr, 0);
4416     return 0;
4417 }
4418 
4419 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4420                                                struct shmid_ds *host_sd)
4421 {
4422     struct target_shmid_ds *target_sd;
4423 
4424     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4425         return -TARGET_EFAULT;
4426     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4427         return -TARGET_EFAULT;
4428     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4429     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4430     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4431     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4432     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4433     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4434     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4435     unlock_user_struct(target_sd, target_addr, 1);
4436     return 0;
4437 }
4438 
4439 struct  target_shminfo {
4440     abi_ulong shmmax;
4441     abi_ulong shmmin;
4442     abi_ulong shmmni;
4443     abi_ulong shmseg;
4444     abi_ulong shmall;
4445 };
4446 
4447 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4448                                               struct shminfo *host_shminfo)
4449 {
4450     struct target_shminfo *target_shminfo;
4451     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4452         return -TARGET_EFAULT;
4453     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4454     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4455     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4456     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4457     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4458     unlock_user_struct(target_shminfo, target_addr, 1);
4459     return 0;
4460 }
4461 
4462 struct target_shm_info {
4463     int used_ids;
4464     abi_ulong shm_tot;
4465     abi_ulong shm_rss;
4466     abi_ulong shm_swp;
4467     abi_ulong swap_attempts;
4468     abi_ulong swap_successes;
4469 };
4470 
4471 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4472                                                struct shm_info *host_shm_info)
4473 {
4474     struct target_shm_info *target_shm_info;
4475     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4476         return -TARGET_EFAULT;
4477     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4478     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4479     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4480     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4481     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4482     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4483     unlock_user_struct(target_shm_info, target_addr, 1);
4484     return 0;
4485 }
4486 
4487 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4488 {
4489     struct shmid_ds dsarg;
4490     struct shminfo shminfo;
4491     struct shm_info shm_info;
4492     abi_long ret = -TARGET_EINVAL;
4493 
4494     cmd &= 0xff;
4495 
4496     switch(cmd) {
4497     case IPC_STAT:
4498     case IPC_SET:
4499     case SHM_STAT:
4500         if (target_to_host_shmid_ds(&dsarg, buf))
4501             return -TARGET_EFAULT;
4502         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4503         if (host_to_target_shmid_ds(buf, &dsarg))
4504             return -TARGET_EFAULT;
4505         break;
4506     case IPC_INFO:
4507         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4508         if (host_to_target_shminfo(buf, &shminfo))
4509             return -TARGET_EFAULT;
4510         break;
4511     case SHM_INFO:
4512         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4513         if (host_to_target_shm_info(buf, &shm_info))
4514             return -TARGET_EFAULT;
4515         break;
4516     case IPC_RMID:
4517     case SHM_LOCK:
4518     case SHM_UNLOCK:
4519         ret = get_errno(shmctl(shmid, cmd, NULL));
4520         break;
4521     }
4522 
4523     return ret;
4524 }
4525 
4526 #ifndef TARGET_FORCE_SHMLBA
4527 /* For most architectures, SHMLBA is the same as the page size;
4528  * some architectures have larger values, in which case they should
4529  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4530  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4531  * and defining its own value for SHMLBA.
4532  *
4533  * The kernel also permits SHMLBA to be set by the architecture to a
4534  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4535  * this means that addresses are rounded to the large size if
4536  * SHM_RND is set but addresses not aligned to that size are not rejected
4537  * as long as they are at least page-aligned. Since the only architecture
4538  * which uses this is ia64 this code doesn't provide for that oddity.
4539  */
4540 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4541 {
4542     return TARGET_PAGE_SIZE;
4543 }
4544 #endif
4545 
4546 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4547                           abi_ulong shmaddr, int shmflg)
4548 {
4549     CPUState *cpu = env_cpu(cpu_env);
4550     abi_ulong raddr;
4551     void *host_raddr;
4552     struct shmid_ds shm_info;
4553     int i, ret;
4554     abi_ulong shmlba;
4555 
4556     /* shmat pointers are always untagged */
4557 
4558     /* find out the length of the shared memory segment */
4559     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4560     if (is_error(ret)) {
4561         /* can't get length, bail out */
4562         return ret;
4563     }
4564 
4565     shmlba = target_shmlba(cpu_env);
4566 
4567     if (shmaddr & (shmlba - 1)) {
4568         if (shmflg & SHM_RND) {
4569             shmaddr &= ~(shmlba - 1);
4570         } else {
4571             return -TARGET_EINVAL;
4572         }
4573     }
4574     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4575         return -TARGET_EINVAL;
4576     }
4577 
4578     mmap_lock();
4579 
4580     /*
4581      * We're mapping shared memory, so ensure we generate code for parallel
4582      * execution and flush old translations.  This will work up to the level
4583      * supported by the host -- anything that requires EXCP_ATOMIC will not
4584      * be atomic with respect to an external process.
4585      */
4586     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4587         cpu->tcg_cflags |= CF_PARALLEL;
4588         tb_flush(cpu);
4589     }
4590 
4591     if (shmaddr)
4592         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4593     else {
4594         abi_ulong mmap_start;
4595 
4596         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4597         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4598 
4599         if (mmap_start == -1) {
4600             errno = ENOMEM;
4601             host_raddr = (void *)-1;
4602         } else
4603             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4604                                shmflg | SHM_REMAP);
4605     }
4606 
4607     if (host_raddr == (void *)-1) {
4608         mmap_unlock();
4609         return get_errno((intptr_t)host_raddr);
4610     }
4611     raddr = h2g((uintptr_t)host_raddr);
4612 
4613     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4614                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4615                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4616 
4617     for (i = 0; i < N_SHM_REGIONS; i++) {
4618         if (!shm_regions[i].in_use) {
4619             shm_regions[i].in_use = true;
4620             shm_regions[i].start = raddr;
4621             shm_regions[i].size = shm_info.shm_segsz;
4622             break;
4623         }
4624     }
4625 
4626     mmap_unlock();
4627     return raddr;
4628 }
4629 
4630 static inline abi_long do_shmdt(abi_ulong shmaddr)
4631 {
4632     int i;
4633     abi_long rv;
4634 
4635     /* shmdt pointers are always untagged */
4636 
4637     mmap_lock();
4638 
4639     for (i = 0; i < N_SHM_REGIONS; ++i) {
4640         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4641             shm_regions[i].in_use = false;
4642             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4643             break;
4644         }
4645     }
4646     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4647 
4648     mmap_unlock();
4649 
4650     return rv;
4651 }
4652 
4653 #ifdef TARGET_NR_ipc
4654 /* ??? This only works with linear mappings.  */
4655 /* do_ipc() must return target values and target errnos. */
4656 static abi_long do_ipc(CPUArchState *cpu_env,
4657                        unsigned int call, abi_long first,
4658                        abi_long second, abi_long third,
4659                        abi_long ptr, abi_long fifth)
4660 {
4661     int version;
4662     abi_long ret = 0;
4663 
4664     version = call >> 16;
4665     call &= 0xffff;
4666 
4667     switch (call) {
4668     case IPCOP_semop:
4669         ret = do_semtimedop(first, ptr, second, 0, false);
4670         break;
4671     case IPCOP_semtimedop:
4672     /*
4673      * The s390 sys_ipc variant has only five parameters instead of six
4674      * (as for default variant) and the only difference is the handling of
4675      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4676      * to a struct timespec where the generic variant uses fifth parameter.
4677      */
4678 #if defined(TARGET_S390X)
4679         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4680 #else
4681         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4682 #endif
4683         break;
4684 
4685     case IPCOP_semget:
4686         ret = get_errno(semget(first, second, third));
4687         break;
4688 
4689     case IPCOP_semctl: {
4690         /* The semun argument to semctl is passed by value, so dereference the
4691          * ptr argument. */
4692         abi_ulong atptr;
4693         get_user_ual(atptr, ptr);
4694         ret = do_semctl(first, second, third, atptr);
4695         break;
4696     }
4697 
4698     case IPCOP_msgget:
4699         ret = get_errno(msgget(first, second));
4700         break;
4701 
4702     case IPCOP_msgsnd:
4703         ret = do_msgsnd(first, ptr, second, third);
4704         break;
4705 
4706     case IPCOP_msgctl:
4707         ret = do_msgctl(first, second, ptr);
4708         break;
4709 
4710     case IPCOP_msgrcv:
4711         switch (version) {
4712         case 0:
4713             {
4714                 struct target_ipc_kludge {
4715                     abi_long msgp;
4716                     abi_long msgtyp;
4717                 } *tmp;
4718 
4719                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4720                     ret = -TARGET_EFAULT;
4721                     break;
4722                 }
4723 
4724                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4725 
4726                 unlock_user_struct(tmp, ptr, 0);
4727                 break;
4728             }
4729         default:
4730             ret = do_msgrcv(first, ptr, second, fifth, third);
4731         }
4732         break;
4733 
4734     case IPCOP_shmat:
4735         switch (version) {
4736         default:
4737         {
4738             abi_ulong raddr;
4739             raddr = do_shmat(cpu_env, first, ptr, second);
4740             if (is_error(raddr))
4741                 return get_errno(raddr);
4742             if (put_user_ual(raddr, third))
4743                 return -TARGET_EFAULT;
4744             break;
4745         }
4746         case 1:
4747             ret = -TARGET_EINVAL;
4748             break;
4749         }
4750 	break;
4751     case IPCOP_shmdt:
4752         ret = do_shmdt(ptr);
4753 	break;
4754 
4755     case IPCOP_shmget:
4756 	/* IPC_* flag values are the same on all linux platforms */
4757 	ret = get_errno(shmget(first, second, third));
4758 	break;
4759 
4760 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4761     case IPCOP_shmctl:
4762         ret = do_shmctl(first, second, ptr);
4763         break;
4764     default:
4765         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4766                       call, version);
4767 	ret = -TARGET_ENOSYS;
4768 	break;
4769     }
4770     return ret;
4771 }
4772 #endif
4773 
4774 /* kernel structure types definitions */
4775 
4776 #define STRUCT(name, ...) STRUCT_ ## name,
4777 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4778 enum {
4779 #include "syscall_types.h"
4780 STRUCT_MAX
4781 };
4782 #undef STRUCT
4783 #undef STRUCT_SPECIAL
4784 
4785 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4786 #define STRUCT_SPECIAL(name)
4787 #include "syscall_types.h"
4788 #undef STRUCT
4789 #undef STRUCT_SPECIAL
4790 
4791 #define MAX_STRUCT_SIZE 4096
4792 
4793 #ifdef CONFIG_FIEMAP
4794 /* So fiemap access checks don't overflow on 32 bit systems.
4795  * This is very slightly smaller than the limit imposed by
4796  * the underlying kernel.
4797  */
4798 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4799                             / sizeof(struct fiemap_extent))
4800 
4801 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4802                                        int fd, int cmd, abi_long arg)
4803 {
4804     /* The parameter for this ioctl is a struct fiemap followed
4805      * by an array of struct fiemap_extent whose size is set
4806      * in fiemap->fm_extent_count. The array is filled in by the
4807      * ioctl.
4808      */
4809     int target_size_in, target_size_out;
4810     struct fiemap *fm;
4811     const argtype *arg_type = ie->arg_type;
4812     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4813     void *argptr, *p;
4814     abi_long ret;
4815     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4816     uint32_t outbufsz;
4817     int free_fm = 0;
4818 
4819     assert(arg_type[0] == TYPE_PTR);
4820     assert(ie->access == IOC_RW);
4821     arg_type++;
4822     target_size_in = thunk_type_size(arg_type, 0);
4823     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4824     if (!argptr) {
4825         return -TARGET_EFAULT;
4826     }
4827     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4828     unlock_user(argptr, arg, 0);
4829     fm = (struct fiemap *)buf_temp;
4830     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4831         return -TARGET_EINVAL;
4832     }
4833 
4834     outbufsz = sizeof (*fm) +
4835         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4836 
4837     if (outbufsz > MAX_STRUCT_SIZE) {
4838         /* We can't fit all the extents into the fixed size buffer.
4839          * Allocate one that is large enough and use it instead.
4840          */
4841         fm = g_try_malloc(outbufsz);
4842         if (!fm) {
4843             return -TARGET_ENOMEM;
4844         }
4845         memcpy(fm, buf_temp, sizeof(struct fiemap));
4846         free_fm = 1;
4847     }
4848     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4849     if (!is_error(ret)) {
4850         target_size_out = target_size_in;
4851         /* An extent_count of 0 means we were only counting the extents
4852          * so there are no structs to copy
4853          */
4854         if (fm->fm_extent_count != 0) {
4855             target_size_out += fm->fm_mapped_extents * extent_size;
4856         }
4857         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4858         if (!argptr) {
4859             ret = -TARGET_EFAULT;
4860         } else {
4861             /* Convert the struct fiemap */
4862             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4863             if (fm->fm_extent_count != 0) {
4864                 p = argptr + target_size_in;
4865                 /* ...and then all the struct fiemap_extents */
4866                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4867                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4868                                   THUNK_TARGET);
4869                     p += extent_size;
4870                 }
4871             }
4872             unlock_user(argptr, arg, target_size_out);
4873         }
4874     }
4875     if (free_fm) {
4876         g_free(fm);
4877     }
4878     return ret;
4879 }
4880 #endif
4881 
4882 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4883                                 int fd, int cmd, abi_long arg)
4884 {
4885     const argtype *arg_type = ie->arg_type;
4886     int target_size;
4887     void *argptr;
4888     int ret;
4889     struct ifconf *host_ifconf;
4890     uint32_t outbufsz;
4891     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4892     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4893     int target_ifreq_size;
4894     int nb_ifreq;
4895     int free_buf = 0;
4896     int i;
4897     int target_ifc_len;
4898     abi_long target_ifc_buf;
4899     int host_ifc_len;
4900     char *host_ifc_buf;
4901 
4902     assert(arg_type[0] == TYPE_PTR);
4903     assert(ie->access == IOC_RW);
4904 
4905     arg_type++;
4906     target_size = thunk_type_size(arg_type, 0);
4907 
4908     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4909     if (!argptr)
4910         return -TARGET_EFAULT;
4911     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4912     unlock_user(argptr, arg, 0);
4913 
4914     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4915     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4916     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4917 
4918     if (target_ifc_buf != 0) {
4919         target_ifc_len = host_ifconf->ifc_len;
4920         nb_ifreq = target_ifc_len / target_ifreq_size;
4921         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4922 
4923         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4924         if (outbufsz > MAX_STRUCT_SIZE) {
4925             /*
4926              * We can't fit all the extents into the fixed size buffer.
4927              * Allocate one that is large enough and use it instead.
4928              */
4929             host_ifconf = g_try_malloc(outbufsz);
4930             if (!host_ifconf) {
4931                 return -TARGET_ENOMEM;
4932             }
4933             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4934             free_buf = 1;
4935         }
4936         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4937 
4938         host_ifconf->ifc_len = host_ifc_len;
4939     } else {
4940       host_ifc_buf = NULL;
4941     }
4942     host_ifconf->ifc_buf = host_ifc_buf;
4943 
4944     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4945     if (!is_error(ret)) {
4946 	/* convert host ifc_len to target ifc_len */
4947 
4948         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4949         target_ifc_len = nb_ifreq * target_ifreq_size;
4950         host_ifconf->ifc_len = target_ifc_len;
4951 
4952 	/* restore target ifc_buf */
4953 
4954         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4955 
4956 	/* copy struct ifconf to target user */
4957 
4958         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4959         if (!argptr)
4960             return -TARGET_EFAULT;
4961         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4962         unlock_user(argptr, arg, target_size);
4963 
4964         if (target_ifc_buf != 0) {
4965             /* copy ifreq[] to target user */
4966             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4967             for (i = 0; i < nb_ifreq ; i++) {
4968                 thunk_convert(argptr + i * target_ifreq_size,
4969                               host_ifc_buf + i * sizeof(struct ifreq),
4970                               ifreq_arg_type, THUNK_TARGET);
4971             }
4972             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4973         }
4974     }
4975 
4976     if (free_buf) {
4977         g_free(host_ifconf);
4978     }
4979 
4980     return ret;
4981 }
4982 
4983 #if defined(CONFIG_USBFS)
4984 #if HOST_LONG_BITS > 64
4985 #error USBDEVFS thunks do not support >64 bit hosts yet.
4986 #endif
4987 struct live_urb {
4988     uint64_t target_urb_adr;
4989     uint64_t target_buf_adr;
4990     char *target_buf_ptr;
4991     struct usbdevfs_urb host_urb;
4992 };
4993 
4994 static GHashTable *usbdevfs_urb_hashtable(void)
4995 {
4996     static GHashTable *urb_hashtable;
4997 
4998     if (!urb_hashtable) {
4999         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5000     }
5001     return urb_hashtable;
5002 }
5003 
5004 static void urb_hashtable_insert(struct live_urb *urb)
5005 {
5006     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5007     g_hash_table_insert(urb_hashtable, urb, urb);
5008 }
5009 
5010 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5011 {
5012     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5013     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5014 }
5015 
5016 static void urb_hashtable_remove(struct live_urb *urb)
5017 {
5018     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5019     g_hash_table_remove(urb_hashtable, urb);
5020 }
5021 
5022 static abi_long
5023 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5024                           int fd, int cmd, abi_long arg)
5025 {
5026     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5027     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5028     struct live_urb *lurb;
5029     void *argptr;
5030     uint64_t hurb;
5031     int target_size;
5032     uintptr_t target_urb_adr;
5033     abi_long ret;
5034 
5035     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5036 
5037     memset(buf_temp, 0, sizeof(uint64_t));
5038     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5039     if (is_error(ret)) {
5040         return ret;
5041     }
5042 
5043     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5044     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5045     if (!lurb->target_urb_adr) {
5046         return -TARGET_EFAULT;
5047     }
5048     urb_hashtable_remove(lurb);
5049     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5050         lurb->host_urb.buffer_length);
5051     lurb->target_buf_ptr = NULL;
5052 
5053     /* restore the guest buffer pointer */
5054     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5055 
5056     /* update the guest urb struct */
5057     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5058     if (!argptr) {
5059         g_free(lurb);
5060         return -TARGET_EFAULT;
5061     }
5062     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5063     unlock_user(argptr, lurb->target_urb_adr, target_size);
5064 
5065     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5066     /* write back the urb handle */
5067     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5068     if (!argptr) {
5069         g_free(lurb);
5070         return -TARGET_EFAULT;
5071     }
5072 
5073     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5074     target_urb_adr = lurb->target_urb_adr;
5075     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5076     unlock_user(argptr, arg, target_size);
5077 
5078     g_free(lurb);
5079     return ret;
5080 }
5081 
5082 static abi_long
5083 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5084                              uint8_t *buf_temp __attribute__((unused)),
5085                              int fd, int cmd, abi_long arg)
5086 {
5087     struct live_urb *lurb;
5088 
5089     /* map target address back to host URB with metadata. */
5090     lurb = urb_hashtable_lookup(arg);
5091     if (!lurb) {
5092         return -TARGET_EFAULT;
5093     }
5094     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5095 }
5096 
5097 static abi_long
5098 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5099                             int fd, int cmd, abi_long arg)
5100 {
5101     const argtype *arg_type = ie->arg_type;
5102     int target_size;
5103     abi_long ret;
5104     void *argptr;
5105     int rw_dir;
5106     struct live_urb *lurb;
5107 
5108     /*
5109      * each submitted URB needs to map to a unique ID for the
5110      * kernel, and that unique ID needs to be a pointer to
5111      * host memory.  hence, we need to malloc for each URB.
5112      * isochronous transfers have a variable length struct.
5113      */
5114     arg_type++;
5115     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5116 
5117     /* construct host copy of urb and metadata */
5118     lurb = g_try_new0(struct live_urb, 1);
5119     if (!lurb) {
5120         return -TARGET_ENOMEM;
5121     }
5122 
5123     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5124     if (!argptr) {
5125         g_free(lurb);
5126         return -TARGET_EFAULT;
5127     }
5128     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5129     unlock_user(argptr, arg, 0);
5130 
5131     lurb->target_urb_adr = arg;
5132     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5133 
5134     /* buffer space used depends on endpoint type so lock the entire buffer */
5135     /* control type urbs should check the buffer contents for true direction */
5136     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5137     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5138         lurb->host_urb.buffer_length, 1);
5139     if (lurb->target_buf_ptr == NULL) {
5140         g_free(lurb);
5141         return -TARGET_EFAULT;
5142     }
5143 
5144     /* update buffer pointer in host copy */
5145     lurb->host_urb.buffer = lurb->target_buf_ptr;
5146 
5147     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5148     if (is_error(ret)) {
5149         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5150         g_free(lurb);
5151     } else {
5152         urb_hashtable_insert(lurb);
5153     }
5154 
5155     return ret;
5156 }
5157 #endif /* CONFIG_USBFS */
5158 
5159 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5160                             int cmd, abi_long arg)
5161 {
5162     void *argptr;
5163     struct dm_ioctl *host_dm;
5164     abi_long guest_data;
5165     uint32_t guest_data_size;
5166     int target_size;
5167     const argtype *arg_type = ie->arg_type;
5168     abi_long ret;
5169     void *big_buf = NULL;
5170     char *host_data;
5171 
5172     arg_type++;
5173     target_size = thunk_type_size(arg_type, 0);
5174     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5175     if (!argptr) {
5176         ret = -TARGET_EFAULT;
5177         goto out;
5178     }
5179     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5180     unlock_user(argptr, arg, 0);
5181 
5182     /* buf_temp is too small, so fetch things into a bigger buffer */
5183     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5184     memcpy(big_buf, buf_temp, target_size);
5185     buf_temp = big_buf;
5186     host_dm = big_buf;
5187 
5188     guest_data = arg + host_dm->data_start;
5189     if ((guest_data - arg) < 0) {
5190         ret = -TARGET_EINVAL;
5191         goto out;
5192     }
5193     guest_data_size = host_dm->data_size - host_dm->data_start;
5194     host_data = (char*)host_dm + host_dm->data_start;
5195 
5196     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5197     if (!argptr) {
5198         ret = -TARGET_EFAULT;
5199         goto out;
5200     }
5201 
5202     switch (ie->host_cmd) {
5203     case DM_REMOVE_ALL:
5204     case DM_LIST_DEVICES:
5205     case DM_DEV_CREATE:
5206     case DM_DEV_REMOVE:
5207     case DM_DEV_SUSPEND:
5208     case DM_DEV_STATUS:
5209     case DM_DEV_WAIT:
5210     case DM_TABLE_STATUS:
5211     case DM_TABLE_CLEAR:
5212     case DM_TABLE_DEPS:
5213     case DM_LIST_VERSIONS:
5214         /* no input data */
5215         break;
5216     case DM_DEV_RENAME:
5217     case DM_DEV_SET_GEOMETRY:
5218         /* data contains only strings */
5219         memcpy(host_data, argptr, guest_data_size);
5220         break;
5221     case DM_TARGET_MSG:
5222         memcpy(host_data, argptr, guest_data_size);
5223         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5224         break;
5225     case DM_TABLE_LOAD:
5226     {
5227         void *gspec = argptr;
5228         void *cur_data = host_data;
5229         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5230         int spec_size = thunk_type_size(arg_type, 0);
5231         int i;
5232 
5233         for (i = 0; i < host_dm->target_count; i++) {
5234             struct dm_target_spec *spec = cur_data;
5235             uint32_t next;
5236             int slen;
5237 
5238             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5239             slen = strlen((char*)gspec + spec_size) + 1;
5240             next = spec->next;
5241             spec->next = sizeof(*spec) + slen;
5242             strcpy((char*)&spec[1], gspec + spec_size);
5243             gspec += next;
5244             cur_data += spec->next;
5245         }
5246         break;
5247     }
5248     default:
5249         ret = -TARGET_EINVAL;
5250         unlock_user(argptr, guest_data, 0);
5251         goto out;
5252     }
5253     unlock_user(argptr, guest_data, 0);
5254 
5255     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5256     if (!is_error(ret)) {
5257         guest_data = arg + host_dm->data_start;
5258         guest_data_size = host_dm->data_size - host_dm->data_start;
5259         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5260         switch (ie->host_cmd) {
5261         case DM_REMOVE_ALL:
5262         case DM_DEV_CREATE:
5263         case DM_DEV_REMOVE:
5264         case DM_DEV_RENAME:
5265         case DM_DEV_SUSPEND:
5266         case DM_DEV_STATUS:
5267         case DM_TABLE_LOAD:
5268         case DM_TABLE_CLEAR:
5269         case DM_TARGET_MSG:
5270         case DM_DEV_SET_GEOMETRY:
5271             /* no return data */
5272             break;
5273         case DM_LIST_DEVICES:
5274         {
5275             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5276             uint32_t remaining_data = guest_data_size;
5277             void *cur_data = argptr;
5278             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5279             int nl_size = 12; /* can't use thunk_size due to alignment */
5280 
5281             while (1) {
5282                 uint32_t next = nl->next;
5283                 if (next) {
5284                     nl->next = nl_size + (strlen(nl->name) + 1);
5285                 }
5286                 if (remaining_data < nl->next) {
5287                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5288                     break;
5289                 }
5290                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5291                 strcpy(cur_data + nl_size, nl->name);
5292                 cur_data += nl->next;
5293                 remaining_data -= nl->next;
5294                 if (!next) {
5295                     break;
5296                 }
5297                 nl = (void*)nl + next;
5298             }
5299             break;
5300         }
5301         case DM_DEV_WAIT:
5302         case DM_TABLE_STATUS:
5303         {
5304             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5305             void *cur_data = argptr;
5306             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5307             int spec_size = thunk_type_size(arg_type, 0);
5308             int i;
5309 
5310             for (i = 0; i < host_dm->target_count; i++) {
5311                 uint32_t next = spec->next;
5312                 int slen = strlen((char*)&spec[1]) + 1;
5313                 spec->next = (cur_data - argptr) + spec_size + slen;
5314                 if (guest_data_size < spec->next) {
5315                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5316                     break;
5317                 }
5318                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5319                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5320                 cur_data = argptr + spec->next;
5321                 spec = (void*)host_dm + host_dm->data_start + next;
5322             }
5323             break;
5324         }
5325         case DM_TABLE_DEPS:
5326         {
5327             void *hdata = (void*)host_dm + host_dm->data_start;
5328             int count = *(uint32_t*)hdata;
5329             uint64_t *hdev = hdata + 8;
5330             uint64_t *gdev = argptr + 8;
5331             int i;
5332 
5333             *(uint32_t*)argptr = tswap32(count);
5334             for (i = 0; i < count; i++) {
5335                 *gdev = tswap64(*hdev);
5336                 gdev++;
5337                 hdev++;
5338             }
5339             break;
5340         }
5341         case DM_LIST_VERSIONS:
5342         {
5343             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5344             uint32_t remaining_data = guest_data_size;
5345             void *cur_data = argptr;
5346             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5347             int vers_size = thunk_type_size(arg_type, 0);
5348 
5349             while (1) {
5350                 uint32_t next = vers->next;
5351                 if (next) {
5352                     vers->next = vers_size + (strlen(vers->name) + 1);
5353                 }
5354                 if (remaining_data < vers->next) {
5355                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5356                     break;
5357                 }
5358                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5359                 strcpy(cur_data + vers_size, vers->name);
5360                 cur_data += vers->next;
5361                 remaining_data -= vers->next;
5362                 if (!next) {
5363                     break;
5364                 }
5365                 vers = (void*)vers + next;
5366             }
5367             break;
5368         }
5369         default:
5370             unlock_user(argptr, guest_data, 0);
5371             ret = -TARGET_EINVAL;
5372             goto out;
5373         }
5374         unlock_user(argptr, guest_data, guest_data_size);
5375 
5376         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5377         if (!argptr) {
5378             ret = -TARGET_EFAULT;
5379             goto out;
5380         }
5381         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5382         unlock_user(argptr, arg, target_size);
5383     }
5384 out:
5385     g_free(big_buf);
5386     return ret;
5387 }
5388 
5389 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5390                                int cmd, abi_long arg)
5391 {
5392     void *argptr;
5393     int target_size;
5394     const argtype *arg_type = ie->arg_type;
5395     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5396     abi_long ret;
5397 
5398     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5399     struct blkpg_partition host_part;
5400 
5401     /* Read and convert blkpg */
5402     arg_type++;
5403     target_size = thunk_type_size(arg_type, 0);
5404     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5405     if (!argptr) {
5406         ret = -TARGET_EFAULT;
5407         goto out;
5408     }
5409     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5410     unlock_user(argptr, arg, 0);
5411 
5412     switch (host_blkpg->op) {
5413     case BLKPG_ADD_PARTITION:
5414     case BLKPG_DEL_PARTITION:
5415         /* payload is struct blkpg_partition */
5416         break;
5417     default:
5418         /* Unknown opcode */
5419         ret = -TARGET_EINVAL;
5420         goto out;
5421     }
5422 
5423     /* Read and convert blkpg->data */
5424     arg = (abi_long)(uintptr_t)host_blkpg->data;
5425     target_size = thunk_type_size(part_arg_type, 0);
5426     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5427     if (!argptr) {
5428         ret = -TARGET_EFAULT;
5429         goto out;
5430     }
5431     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5432     unlock_user(argptr, arg, 0);
5433 
5434     /* Swizzle the data pointer to our local copy and call! */
5435     host_blkpg->data = &host_part;
5436     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5437 
5438 out:
5439     return ret;
5440 }
5441 
5442 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5443                                 int fd, int cmd, abi_long arg)
5444 {
5445     const argtype *arg_type = ie->arg_type;
5446     const StructEntry *se;
5447     const argtype *field_types;
5448     const int *dst_offsets, *src_offsets;
5449     int target_size;
5450     void *argptr;
5451     abi_ulong *target_rt_dev_ptr = NULL;
5452     unsigned long *host_rt_dev_ptr = NULL;
5453     abi_long ret;
5454     int i;
5455 
5456     assert(ie->access == IOC_W);
5457     assert(*arg_type == TYPE_PTR);
5458     arg_type++;
5459     assert(*arg_type == TYPE_STRUCT);
5460     target_size = thunk_type_size(arg_type, 0);
5461     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5462     if (!argptr) {
5463         return -TARGET_EFAULT;
5464     }
5465     arg_type++;
5466     assert(*arg_type == (int)STRUCT_rtentry);
5467     se = struct_entries + *arg_type++;
5468     assert(se->convert[0] == NULL);
5469     /* convert struct here to be able to catch rt_dev string */
5470     field_types = se->field_types;
5471     dst_offsets = se->field_offsets[THUNK_HOST];
5472     src_offsets = se->field_offsets[THUNK_TARGET];
5473     for (i = 0; i < se->nb_fields; i++) {
5474         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5475             assert(*field_types == TYPE_PTRVOID);
5476             target_rt_dev_ptr = argptr + src_offsets[i];
5477             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5478             if (*target_rt_dev_ptr != 0) {
5479                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5480                                                   tswapal(*target_rt_dev_ptr));
5481                 if (!*host_rt_dev_ptr) {
5482                     unlock_user(argptr, arg, 0);
5483                     return -TARGET_EFAULT;
5484                 }
5485             } else {
5486                 *host_rt_dev_ptr = 0;
5487             }
5488             field_types++;
5489             continue;
5490         }
5491         field_types = thunk_convert(buf_temp + dst_offsets[i],
5492                                     argptr + src_offsets[i],
5493                                     field_types, THUNK_HOST);
5494     }
5495     unlock_user(argptr, arg, 0);
5496 
5497     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5498 
5499     assert(host_rt_dev_ptr != NULL);
5500     assert(target_rt_dev_ptr != NULL);
5501     if (*host_rt_dev_ptr != 0) {
5502         unlock_user((void *)*host_rt_dev_ptr,
5503                     *target_rt_dev_ptr, 0);
5504     }
5505     return ret;
5506 }
5507 
5508 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5509                                      int fd, int cmd, abi_long arg)
5510 {
5511     int sig = target_to_host_signal(arg);
5512     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5513 }
5514 
5515 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5516                                     int fd, int cmd, abi_long arg)
5517 {
5518     struct timeval tv;
5519     abi_long ret;
5520 
5521     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5522     if (is_error(ret)) {
5523         return ret;
5524     }
5525 
5526     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5527         if (copy_to_user_timeval(arg, &tv)) {
5528             return -TARGET_EFAULT;
5529         }
5530     } else {
5531         if (copy_to_user_timeval64(arg, &tv)) {
5532             return -TARGET_EFAULT;
5533         }
5534     }
5535 
5536     return ret;
5537 }
5538 
5539 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5540                                       int fd, int cmd, abi_long arg)
5541 {
5542     struct timespec ts;
5543     abi_long ret;
5544 
5545     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5546     if (is_error(ret)) {
5547         return ret;
5548     }
5549 
5550     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5551         if (host_to_target_timespec(arg, &ts)) {
5552             return -TARGET_EFAULT;
5553         }
5554     } else{
5555         if (host_to_target_timespec64(arg, &ts)) {
5556             return -TARGET_EFAULT;
5557         }
5558     }
5559 
5560     return ret;
5561 }
5562 
5563 #ifdef TIOCGPTPEER
5564 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5565                                      int fd, int cmd, abi_long arg)
5566 {
5567     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5568     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5569 }
5570 #endif
5571 
5572 #ifdef HAVE_DRM_H
5573 
5574 static void unlock_drm_version(struct drm_version *host_ver,
5575                                struct target_drm_version *target_ver,
5576                                bool copy)
5577 {
5578     unlock_user(host_ver->name, target_ver->name,
5579                                 copy ? host_ver->name_len : 0);
5580     unlock_user(host_ver->date, target_ver->date,
5581                                 copy ? host_ver->date_len : 0);
5582     unlock_user(host_ver->desc, target_ver->desc,
5583                                 copy ? host_ver->desc_len : 0);
5584 }
5585 
5586 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5587                                           struct target_drm_version *target_ver)
5588 {
5589     memset(host_ver, 0, sizeof(*host_ver));
5590 
5591     __get_user(host_ver->name_len, &target_ver->name_len);
5592     if (host_ver->name_len) {
5593         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5594                                    target_ver->name_len, 0);
5595         if (!host_ver->name) {
5596             return -EFAULT;
5597         }
5598     }
5599 
5600     __get_user(host_ver->date_len, &target_ver->date_len);
5601     if (host_ver->date_len) {
5602         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5603                                    target_ver->date_len, 0);
5604         if (!host_ver->date) {
5605             goto err;
5606         }
5607     }
5608 
5609     __get_user(host_ver->desc_len, &target_ver->desc_len);
5610     if (host_ver->desc_len) {
5611         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5612                                    target_ver->desc_len, 0);
5613         if (!host_ver->desc) {
5614             goto err;
5615         }
5616     }
5617 
5618     return 0;
5619 err:
5620     unlock_drm_version(host_ver, target_ver, false);
5621     return -EFAULT;
5622 }
5623 
5624 static inline void host_to_target_drmversion(
5625                                           struct target_drm_version *target_ver,
5626                                           struct drm_version *host_ver)
5627 {
5628     __put_user(host_ver->version_major, &target_ver->version_major);
5629     __put_user(host_ver->version_minor, &target_ver->version_minor);
5630     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5631     __put_user(host_ver->name_len, &target_ver->name_len);
5632     __put_user(host_ver->date_len, &target_ver->date_len);
5633     __put_user(host_ver->desc_len, &target_ver->desc_len);
5634     unlock_drm_version(host_ver, target_ver, true);
5635 }
5636 
5637 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5638                              int fd, int cmd, abi_long arg)
5639 {
5640     struct drm_version *ver;
5641     struct target_drm_version *target_ver;
5642     abi_long ret;
5643 
5644     switch (ie->host_cmd) {
5645     case DRM_IOCTL_VERSION:
5646         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5647             return -TARGET_EFAULT;
5648         }
5649         ver = (struct drm_version *)buf_temp;
5650         ret = target_to_host_drmversion(ver, target_ver);
5651         if (!is_error(ret)) {
5652             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5653             if (is_error(ret)) {
5654                 unlock_drm_version(ver, target_ver, false);
5655             } else {
5656                 host_to_target_drmversion(target_ver, ver);
5657             }
5658         }
5659         unlock_user_struct(target_ver, arg, 0);
5660         return ret;
5661     }
5662     return -TARGET_ENOSYS;
5663 }
5664 
5665 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5666                                            struct drm_i915_getparam *gparam,
5667                                            int fd, abi_long arg)
5668 {
5669     abi_long ret;
5670     int value;
5671     struct target_drm_i915_getparam *target_gparam;
5672 
5673     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5674         return -TARGET_EFAULT;
5675     }
5676 
5677     __get_user(gparam->param, &target_gparam->param);
5678     gparam->value = &value;
5679     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5680     put_user_s32(value, target_gparam->value);
5681 
5682     unlock_user_struct(target_gparam, arg, 0);
5683     return ret;
5684 }
5685 
5686 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5687                                   int fd, int cmd, abi_long arg)
5688 {
5689     switch (ie->host_cmd) {
5690     case DRM_IOCTL_I915_GETPARAM:
5691         return do_ioctl_drm_i915_getparam(ie,
5692                                           (struct drm_i915_getparam *)buf_temp,
5693                                           fd, arg);
5694     default:
5695         return -TARGET_ENOSYS;
5696     }
5697 }
5698 
5699 #endif
5700 
5701 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5702                                         int fd, int cmd, abi_long arg)
5703 {
5704     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5705     struct tun_filter *target_filter;
5706     char *target_addr;
5707 
5708     assert(ie->access == IOC_W);
5709 
5710     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5711     if (!target_filter) {
5712         return -TARGET_EFAULT;
5713     }
5714     filter->flags = tswap16(target_filter->flags);
5715     filter->count = tswap16(target_filter->count);
5716     unlock_user(target_filter, arg, 0);
5717 
5718     if (filter->count) {
5719         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5720             MAX_STRUCT_SIZE) {
5721             return -TARGET_EFAULT;
5722         }
5723 
5724         target_addr = lock_user(VERIFY_READ,
5725                                 arg + offsetof(struct tun_filter, addr),
5726                                 filter->count * ETH_ALEN, 1);
5727         if (!target_addr) {
5728             return -TARGET_EFAULT;
5729         }
5730         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5731         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5732     }
5733 
5734     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5735 }
5736 
5737 IOCTLEntry ioctl_entries[] = {
5738 #define IOCTL(cmd, access, ...) \
5739     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5740 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5741     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5742 #define IOCTL_IGNORE(cmd) \
5743     { TARGET_ ## cmd, 0, #cmd },
5744 #include "ioctls.h"
5745     { 0, 0, },
5746 };
5747 
5748 /* ??? Implement proper locking for ioctls.  */
5749 /* do_ioctl() Must return target values and target errnos. */
5750 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5751 {
5752     const IOCTLEntry *ie;
5753     const argtype *arg_type;
5754     abi_long ret;
5755     uint8_t buf_temp[MAX_STRUCT_SIZE];
5756     int target_size;
5757     void *argptr;
5758 
5759     ie = ioctl_entries;
5760     for(;;) {
5761         if (ie->target_cmd == 0) {
5762             qemu_log_mask(
5763                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5764             return -TARGET_ENOTTY;
5765         }
5766         if (ie->target_cmd == cmd)
5767             break;
5768         ie++;
5769     }
5770     arg_type = ie->arg_type;
5771     if (ie->do_ioctl) {
5772         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5773     } else if (!ie->host_cmd) {
5774         /* Some architectures define BSD ioctls in their headers
5775            that are not implemented in Linux.  */
5776         return -TARGET_ENOTTY;
5777     }
5778 
5779     switch(arg_type[0]) {
5780     case TYPE_NULL:
5781         /* no argument */
5782         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5783         break;
5784     case TYPE_PTRVOID:
5785     case TYPE_INT:
5786     case TYPE_LONG:
5787     case TYPE_ULONG:
5788         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5789         break;
5790     case TYPE_PTR:
5791         arg_type++;
5792         target_size = thunk_type_size(arg_type, 0);
5793         switch(ie->access) {
5794         case IOC_R:
5795             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5796             if (!is_error(ret)) {
5797                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5798                 if (!argptr)
5799                     return -TARGET_EFAULT;
5800                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5801                 unlock_user(argptr, arg, target_size);
5802             }
5803             break;
5804         case IOC_W:
5805             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5806             if (!argptr)
5807                 return -TARGET_EFAULT;
5808             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5809             unlock_user(argptr, arg, 0);
5810             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5811             break;
5812         default:
5813         case IOC_RW:
5814             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5815             if (!argptr)
5816                 return -TARGET_EFAULT;
5817             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5818             unlock_user(argptr, arg, 0);
5819             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5820             if (!is_error(ret)) {
5821                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5822                 if (!argptr)
5823                     return -TARGET_EFAULT;
5824                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5825                 unlock_user(argptr, arg, target_size);
5826             }
5827             break;
5828         }
5829         break;
5830     default:
5831         qemu_log_mask(LOG_UNIMP,
5832                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5833                       (long)cmd, arg_type[0]);
5834         ret = -TARGET_ENOTTY;
5835         break;
5836     }
5837     return ret;
5838 }
5839 
5840 static const bitmask_transtbl iflag_tbl[] = {
5841         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5842         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5843         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5844         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5845         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5846         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5847         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5848         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5849         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5850         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5851         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5852         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5853         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5854         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5855         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5856         { 0, 0, 0, 0 }
5857 };
5858 
5859 static const bitmask_transtbl oflag_tbl[] = {
5860 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5861 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5862 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5863 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5864 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5865 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5866 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5867 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5868 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5869 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5870 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5871 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5872 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5873 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5874 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5875 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5876 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5877 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5878 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5879 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5880 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5881 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5882 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5883 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5884 	{ 0, 0, 0, 0 }
5885 };
5886 
5887 static const bitmask_transtbl cflag_tbl[] = {
5888 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5889 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5890 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5891 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5892 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5893 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5894 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5895 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5896 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5897 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5898 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5899 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5900 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5901 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5902 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5903 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5904 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5905 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5906 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5907 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5908 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5909 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5910 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5911 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5912 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5913 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5914 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5915 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5916 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5917 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5918 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5919 	{ 0, 0, 0, 0 }
5920 };
5921 
5922 static const bitmask_transtbl lflag_tbl[] = {
5923   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5924   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5925   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5926   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5927   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5928   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5929   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5930   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5931   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5932   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5933   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5934   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5935   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5936   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5937   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5938   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5939   { 0, 0, 0, 0 }
5940 };
5941 
5942 static void target_to_host_termios (void *dst, const void *src)
5943 {
5944     struct host_termios *host = dst;
5945     const struct target_termios *target = src;
5946 
5947     host->c_iflag =
5948         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5949     host->c_oflag =
5950         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5951     host->c_cflag =
5952         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5953     host->c_lflag =
5954         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5955     host->c_line = target->c_line;
5956 
5957     memset(host->c_cc, 0, sizeof(host->c_cc));
5958     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5959     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5960     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5961     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5962     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5963     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5964     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5965     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5966     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5967     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5968     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5969     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5970     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5971     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5972     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5973     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5974     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5975 }
5976 
5977 static void host_to_target_termios (void *dst, const void *src)
5978 {
5979     struct target_termios *target = dst;
5980     const struct host_termios *host = src;
5981 
5982     target->c_iflag =
5983         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5984     target->c_oflag =
5985         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5986     target->c_cflag =
5987         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5988     target->c_lflag =
5989         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5990     target->c_line = host->c_line;
5991 
5992     memset(target->c_cc, 0, sizeof(target->c_cc));
5993     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5994     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5995     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5996     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5997     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5998     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5999     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6000     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6001     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6002     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6003     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6004     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6005     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6006     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6007     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6008     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6009     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6010 }
6011 
6012 static const StructEntry struct_termios_def = {
6013     .convert = { host_to_target_termios, target_to_host_termios },
6014     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6015     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6016     .print = print_termios,
6017 };
6018 
6019 /* If the host does not provide these bits, they may be safely discarded. */
6020 #ifndef MAP_SYNC
6021 #define MAP_SYNC 0
6022 #endif
6023 #ifndef MAP_UNINITIALIZED
6024 #define MAP_UNINITIALIZED 0
6025 #endif
6026 
6027 static const bitmask_transtbl mmap_flags_tbl[] = {
6028     { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
6029     { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
6030     { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
6031       MAP_TYPE, MAP_SHARED_VALIDATE },
6032     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6033     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6034       MAP_ANONYMOUS, MAP_ANONYMOUS },
6035     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6036       MAP_GROWSDOWN, MAP_GROWSDOWN },
6037     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6038       MAP_DENYWRITE, MAP_DENYWRITE },
6039     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6040       MAP_EXECUTABLE, MAP_EXECUTABLE },
6041     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6042     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6043       MAP_NORESERVE, MAP_NORESERVE },
6044     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6045     /* MAP_STACK had been ignored by the kernel for quite some time.
6046        Recognize it for the target insofar as we do not want to pass
6047        it through to the host.  */
6048     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6049     { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6050     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6051     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6052     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6053       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6054     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6055       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6056     { 0, 0, 0, 0 }
6057 };
6058 
6059 /*
6060  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6061  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6062  */
6063 #if defined(TARGET_I386)
6064 
6065 /* NOTE: there is really one LDT for all the threads */
6066 static uint8_t *ldt_table;
6067 
6068 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6069 {
6070     int size;
6071     void *p;
6072 
6073     if (!ldt_table)
6074         return 0;
6075     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6076     if (size > bytecount)
6077         size = bytecount;
6078     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6079     if (!p)
6080         return -TARGET_EFAULT;
6081     /* ??? Should this by byteswapped?  */
6082     memcpy(p, ldt_table, size);
6083     unlock_user(p, ptr, size);
6084     return size;
6085 }
6086 
6087 /* XXX: add locking support */
6088 static abi_long write_ldt(CPUX86State *env,
6089                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6090 {
6091     struct target_modify_ldt_ldt_s ldt_info;
6092     struct target_modify_ldt_ldt_s *target_ldt_info;
6093     int seg_32bit, contents, read_exec_only, limit_in_pages;
6094     int seg_not_present, useable, lm;
6095     uint32_t *lp, entry_1, entry_2;
6096 
6097     if (bytecount != sizeof(ldt_info))
6098         return -TARGET_EINVAL;
6099     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6100         return -TARGET_EFAULT;
6101     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6102     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6103     ldt_info.limit = tswap32(target_ldt_info->limit);
6104     ldt_info.flags = tswap32(target_ldt_info->flags);
6105     unlock_user_struct(target_ldt_info, ptr, 0);
6106 
6107     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6108         return -TARGET_EINVAL;
6109     seg_32bit = ldt_info.flags & 1;
6110     contents = (ldt_info.flags >> 1) & 3;
6111     read_exec_only = (ldt_info.flags >> 3) & 1;
6112     limit_in_pages = (ldt_info.flags >> 4) & 1;
6113     seg_not_present = (ldt_info.flags >> 5) & 1;
6114     useable = (ldt_info.flags >> 6) & 1;
6115 #ifdef TARGET_ABI32
6116     lm = 0;
6117 #else
6118     lm = (ldt_info.flags >> 7) & 1;
6119 #endif
6120     if (contents == 3) {
6121         if (oldmode)
6122             return -TARGET_EINVAL;
6123         if (seg_not_present == 0)
6124             return -TARGET_EINVAL;
6125     }
6126     /* allocate the LDT */
6127     if (!ldt_table) {
6128         env->ldt.base = target_mmap(0,
6129                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6130                                     PROT_READ|PROT_WRITE,
6131                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6132         if (env->ldt.base == -1)
6133             return -TARGET_ENOMEM;
6134         memset(g2h_untagged(env->ldt.base), 0,
6135                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6136         env->ldt.limit = 0xffff;
6137         ldt_table = g2h_untagged(env->ldt.base);
6138     }
6139 
6140     /* NOTE: same code as Linux kernel */
6141     /* Allow LDTs to be cleared by the user. */
6142     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6143         if (oldmode ||
6144             (contents == 0		&&
6145              read_exec_only == 1	&&
6146              seg_32bit == 0		&&
6147              limit_in_pages == 0	&&
6148              seg_not_present == 1	&&
6149              useable == 0 )) {
6150             entry_1 = 0;
6151             entry_2 = 0;
6152             goto install;
6153         }
6154     }
6155 
6156     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6157         (ldt_info.limit & 0x0ffff);
6158     entry_2 = (ldt_info.base_addr & 0xff000000) |
6159         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6160         (ldt_info.limit & 0xf0000) |
6161         ((read_exec_only ^ 1) << 9) |
6162         (contents << 10) |
6163         ((seg_not_present ^ 1) << 15) |
6164         (seg_32bit << 22) |
6165         (limit_in_pages << 23) |
6166         (lm << 21) |
6167         0x7000;
6168     if (!oldmode)
6169         entry_2 |= (useable << 20);
6170 
6171     /* Install the new entry ...  */
6172 install:
6173     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6174     lp[0] = tswap32(entry_1);
6175     lp[1] = tswap32(entry_2);
6176     return 0;
6177 }
6178 
6179 /* specific and weird i386 syscalls */
6180 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6181                               unsigned long bytecount)
6182 {
6183     abi_long ret;
6184 
6185     switch (func) {
6186     case 0:
6187         ret = read_ldt(ptr, bytecount);
6188         break;
6189     case 1:
6190         ret = write_ldt(env, ptr, bytecount, 1);
6191         break;
6192     case 0x11:
6193         ret = write_ldt(env, ptr, bytecount, 0);
6194         break;
6195     default:
6196         ret = -TARGET_ENOSYS;
6197         break;
6198     }
6199     return ret;
6200 }
6201 
6202 #if defined(TARGET_ABI32)
6203 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6204 {
6205     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6206     struct target_modify_ldt_ldt_s ldt_info;
6207     struct target_modify_ldt_ldt_s *target_ldt_info;
6208     int seg_32bit, contents, read_exec_only, limit_in_pages;
6209     int seg_not_present, useable, lm;
6210     uint32_t *lp, entry_1, entry_2;
6211     int i;
6212 
6213     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6214     if (!target_ldt_info)
6215         return -TARGET_EFAULT;
6216     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6217     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6218     ldt_info.limit = tswap32(target_ldt_info->limit);
6219     ldt_info.flags = tswap32(target_ldt_info->flags);
6220     if (ldt_info.entry_number == -1) {
6221         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6222             if (gdt_table[i] == 0) {
6223                 ldt_info.entry_number = i;
6224                 target_ldt_info->entry_number = tswap32(i);
6225                 break;
6226             }
6227         }
6228     }
6229     unlock_user_struct(target_ldt_info, ptr, 1);
6230 
6231     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6232         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6233            return -TARGET_EINVAL;
6234     seg_32bit = ldt_info.flags & 1;
6235     contents = (ldt_info.flags >> 1) & 3;
6236     read_exec_only = (ldt_info.flags >> 3) & 1;
6237     limit_in_pages = (ldt_info.flags >> 4) & 1;
6238     seg_not_present = (ldt_info.flags >> 5) & 1;
6239     useable = (ldt_info.flags >> 6) & 1;
6240 #ifdef TARGET_ABI32
6241     lm = 0;
6242 #else
6243     lm = (ldt_info.flags >> 7) & 1;
6244 #endif
6245 
6246     if (contents == 3) {
6247         if (seg_not_present == 0)
6248             return -TARGET_EINVAL;
6249     }
6250 
6251     /* NOTE: same code as Linux kernel */
6252     /* Allow LDTs to be cleared by the user. */
6253     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6254         if ((contents == 0             &&
6255              read_exec_only == 1       &&
6256              seg_32bit == 0            &&
6257              limit_in_pages == 0       &&
6258              seg_not_present == 1      &&
6259              useable == 0 )) {
6260             entry_1 = 0;
6261             entry_2 = 0;
6262             goto install;
6263         }
6264     }
6265 
6266     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6267         (ldt_info.limit & 0x0ffff);
6268     entry_2 = (ldt_info.base_addr & 0xff000000) |
6269         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6270         (ldt_info.limit & 0xf0000) |
6271         ((read_exec_only ^ 1) << 9) |
6272         (contents << 10) |
6273         ((seg_not_present ^ 1) << 15) |
6274         (seg_32bit << 22) |
6275         (limit_in_pages << 23) |
6276         (useable << 20) |
6277         (lm << 21) |
6278         0x7000;
6279 
6280     /* Install the new entry ...  */
6281 install:
6282     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6283     lp[0] = tswap32(entry_1);
6284     lp[1] = tswap32(entry_2);
6285     return 0;
6286 }
6287 
6288 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6289 {
6290     struct target_modify_ldt_ldt_s *target_ldt_info;
6291     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6292     uint32_t base_addr, limit, flags;
6293     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6294     int seg_not_present, useable, lm;
6295     uint32_t *lp, entry_1, entry_2;
6296 
6297     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6298     if (!target_ldt_info)
6299         return -TARGET_EFAULT;
6300     idx = tswap32(target_ldt_info->entry_number);
6301     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6302         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6303         unlock_user_struct(target_ldt_info, ptr, 1);
6304         return -TARGET_EINVAL;
6305     }
6306     lp = (uint32_t *)(gdt_table + idx);
6307     entry_1 = tswap32(lp[0]);
6308     entry_2 = tswap32(lp[1]);
6309 
6310     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6311     contents = (entry_2 >> 10) & 3;
6312     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6313     seg_32bit = (entry_2 >> 22) & 1;
6314     limit_in_pages = (entry_2 >> 23) & 1;
6315     useable = (entry_2 >> 20) & 1;
6316 #ifdef TARGET_ABI32
6317     lm = 0;
6318 #else
6319     lm = (entry_2 >> 21) & 1;
6320 #endif
6321     flags = (seg_32bit << 0) | (contents << 1) |
6322         (read_exec_only << 3) | (limit_in_pages << 4) |
6323         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6324     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6325     base_addr = (entry_1 >> 16) |
6326         (entry_2 & 0xff000000) |
6327         ((entry_2 & 0xff) << 16);
6328     target_ldt_info->base_addr = tswapal(base_addr);
6329     target_ldt_info->limit = tswap32(limit);
6330     target_ldt_info->flags = tswap32(flags);
6331     unlock_user_struct(target_ldt_info, ptr, 1);
6332     return 0;
6333 }
6334 
6335 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6336 {
6337     return -TARGET_ENOSYS;
6338 }
6339 #else
6340 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6341 {
6342     abi_long ret = 0;
6343     abi_ulong val;
6344     int idx;
6345 
6346     switch(code) {
6347     case TARGET_ARCH_SET_GS:
6348     case TARGET_ARCH_SET_FS:
6349         if (code == TARGET_ARCH_SET_GS)
6350             idx = R_GS;
6351         else
6352             idx = R_FS;
6353         cpu_x86_load_seg(env, idx, 0);
6354         env->segs[idx].base = addr;
6355         break;
6356     case TARGET_ARCH_GET_GS:
6357     case TARGET_ARCH_GET_FS:
6358         if (code == TARGET_ARCH_GET_GS)
6359             idx = R_GS;
6360         else
6361             idx = R_FS;
6362         val = env->segs[idx].base;
6363         if (put_user(val, addr, abi_ulong))
6364             ret = -TARGET_EFAULT;
6365         break;
6366     default:
6367         ret = -TARGET_EINVAL;
6368         break;
6369     }
6370     return ret;
6371 }
6372 #endif /* defined(TARGET_ABI32 */
6373 #endif /* defined(TARGET_I386) */
6374 
6375 /*
6376  * These constants are generic.  Supply any that are missing from the host.
6377  */
6378 #ifndef PR_SET_NAME
6379 # define PR_SET_NAME    15
6380 # define PR_GET_NAME    16
6381 #endif
6382 #ifndef PR_SET_FP_MODE
6383 # define PR_SET_FP_MODE 45
6384 # define PR_GET_FP_MODE 46
6385 # define PR_FP_MODE_FR   (1 << 0)
6386 # define PR_FP_MODE_FRE  (1 << 1)
6387 #endif
6388 #ifndef PR_SVE_SET_VL
6389 # define PR_SVE_SET_VL  50
6390 # define PR_SVE_GET_VL  51
6391 # define PR_SVE_VL_LEN_MASK  0xffff
6392 # define PR_SVE_VL_INHERIT   (1 << 17)
6393 #endif
6394 #ifndef PR_PAC_RESET_KEYS
6395 # define PR_PAC_RESET_KEYS  54
6396 # define PR_PAC_APIAKEY   (1 << 0)
6397 # define PR_PAC_APIBKEY   (1 << 1)
6398 # define PR_PAC_APDAKEY   (1 << 2)
6399 # define PR_PAC_APDBKEY   (1 << 3)
6400 # define PR_PAC_APGAKEY   (1 << 4)
6401 #endif
6402 #ifndef PR_SET_TAGGED_ADDR_CTRL
6403 # define PR_SET_TAGGED_ADDR_CTRL 55
6404 # define PR_GET_TAGGED_ADDR_CTRL 56
6405 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6406 #endif
6407 #ifndef PR_MTE_TCF_SHIFT
6408 # define PR_MTE_TCF_SHIFT       1
6409 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6410 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6411 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6412 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6413 # define PR_MTE_TAG_SHIFT       3
6414 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6415 #endif
6416 #ifndef PR_SET_IO_FLUSHER
6417 # define PR_SET_IO_FLUSHER 57
6418 # define PR_GET_IO_FLUSHER 58
6419 #endif
6420 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6421 # define PR_SET_SYSCALL_USER_DISPATCH 59
6422 #endif
6423 #ifndef PR_SME_SET_VL
6424 # define PR_SME_SET_VL  63
6425 # define PR_SME_GET_VL  64
6426 # define PR_SME_VL_LEN_MASK  0xffff
6427 # define PR_SME_VL_INHERIT   (1 << 17)
6428 #endif
6429 
6430 #include "target_prctl.h"
6431 
6432 static abi_long do_prctl_inval0(CPUArchState *env)
6433 {
6434     return -TARGET_EINVAL;
6435 }
6436 
6437 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6438 {
6439     return -TARGET_EINVAL;
6440 }
6441 
6442 #ifndef do_prctl_get_fp_mode
6443 #define do_prctl_get_fp_mode do_prctl_inval0
6444 #endif
6445 #ifndef do_prctl_set_fp_mode
6446 #define do_prctl_set_fp_mode do_prctl_inval1
6447 #endif
6448 #ifndef do_prctl_sve_get_vl
6449 #define do_prctl_sve_get_vl do_prctl_inval0
6450 #endif
6451 #ifndef do_prctl_sve_set_vl
6452 #define do_prctl_sve_set_vl do_prctl_inval1
6453 #endif
6454 #ifndef do_prctl_reset_keys
6455 #define do_prctl_reset_keys do_prctl_inval1
6456 #endif
6457 #ifndef do_prctl_set_tagged_addr_ctrl
6458 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6459 #endif
6460 #ifndef do_prctl_get_tagged_addr_ctrl
6461 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6462 #endif
6463 #ifndef do_prctl_get_unalign
6464 #define do_prctl_get_unalign do_prctl_inval1
6465 #endif
6466 #ifndef do_prctl_set_unalign
6467 #define do_prctl_set_unalign do_prctl_inval1
6468 #endif
6469 #ifndef do_prctl_sme_get_vl
6470 #define do_prctl_sme_get_vl do_prctl_inval0
6471 #endif
6472 #ifndef do_prctl_sme_set_vl
6473 #define do_prctl_sme_set_vl do_prctl_inval1
6474 #endif
6475 
6476 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6477                          abi_long arg3, abi_long arg4, abi_long arg5)
6478 {
6479     abi_long ret;
6480 
6481     switch (option) {
6482     case PR_GET_PDEATHSIG:
6483         {
6484             int deathsig;
6485             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6486                                   arg3, arg4, arg5));
6487             if (!is_error(ret) &&
6488                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6489                 return -TARGET_EFAULT;
6490             }
6491             return ret;
6492         }
6493     case PR_SET_PDEATHSIG:
6494         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6495                                arg3, arg4, arg5));
6496     case PR_GET_NAME:
6497         {
6498             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6499             if (!name) {
6500                 return -TARGET_EFAULT;
6501             }
6502             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6503                                   arg3, arg4, arg5));
6504             unlock_user(name, arg2, 16);
6505             return ret;
6506         }
6507     case PR_SET_NAME:
6508         {
6509             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6510             if (!name) {
6511                 return -TARGET_EFAULT;
6512             }
6513             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6514                                   arg3, arg4, arg5));
6515             unlock_user(name, arg2, 0);
6516             return ret;
6517         }
6518     case PR_GET_FP_MODE:
6519         return do_prctl_get_fp_mode(env);
6520     case PR_SET_FP_MODE:
6521         return do_prctl_set_fp_mode(env, arg2);
6522     case PR_SVE_GET_VL:
6523         return do_prctl_sve_get_vl(env);
6524     case PR_SVE_SET_VL:
6525         return do_prctl_sve_set_vl(env, arg2);
6526     case PR_SME_GET_VL:
6527         return do_prctl_sme_get_vl(env);
6528     case PR_SME_SET_VL:
6529         return do_prctl_sme_set_vl(env, arg2);
6530     case PR_PAC_RESET_KEYS:
6531         if (arg3 || arg4 || arg5) {
6532             return -TARGET_EINVAL;
6533         }
6534         return do_prctl_reset_keys(env, arg2);
6535     case PR_SET_TAGGED_ADDR_CTRL:
6536         if (arg3 || arg4 || arg5) {
6537             return -TARGET_EINVAL;
6538         }
6539         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6540     case PR_GET_TAGGED_ADDR_CTRL:
6541         if (arg2 || arg3 || arg4 || arg5) {
6542             return -TARGET_EINVAL;
6543         }
6544         return do_prctl_get_tagged_addr_ctrl(env);
6545 
6546     case PR_GET_UNALIGN:
6547         return do_prctl_get_unalign(env, arg2);
6548     case PR_SET_UNALIGN:
6549         return do_prctl_set_unalign(env, arg2);
6550 
6551     case PR_CAP_AMBIENT:
6552     case PR_CAPBSET_READ:
6553     case PR_CAPBSET_DROP:
6554     case PR_GET_DUMPABLE:
6555     case PR_SET_DUMPABLE:
6556     case PR_GET_KEEPCAPS:
6557     case PR_SET_KEEPCAPS:
6558     case PR_GET_SECUREBITS:
6559     case PR_SET_SECUREBITS:
6560     case PR_GET_TIMING:
6561     case PR_SET_TIMING:
6562     case PR_GET_TIMERSLACK:
6563     case PR_SET_TIMERSLACK:
6564     case PR_MCE_KILL:
6565     case PR_MCE_KILL_GET:
6566     case PR_GET_NO_NEW_PRIVS:
6567     case PR_SET_NO_NEW_PRIVS:
6568     case PR_GET_IO_FLUSHER:
6569     case PR_SET_IO_FLUSHER:
6570         /* Some prctl options have no pointer arguments and we can pass on. */
6571         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6572 
6573     case PR_GET_CHILD_SUBREAPER:
6574     case PR_SET_CHILD_SUBREAPER:
6575     case PR_GET_SPECULATION_CTRL:
6576     case PR_SET_SPECULATION_CTRL:
6577     case PR_GET_TID_ADDRESS:
6578         /* TODO */
6579         return -TARGET_EINVAL;
6580 
6581     case PR_GET_FPEXC:
6582     case PR_SET_FPEXC:
6583         /* Was used for SPE on PowerPC. */
6584         return -TARGET_EINVAL;
6585 
6586     case PR_GET_ENDIAN:
6587     case PR_SET_ENDIAN:
6588     case PR_GET_FPEMU:
6589     case PR_SET_FPEMU:
6590     case PR_SET_MM:
6591     case PR_GET_SECCOMP:
6592     case PR_SET_SECCOMP:
6593     case PR_SET_SYSCALL_USER_DISPATCH:
6594     case PR_GET_THP_DISABLE:
6595     case PR_SET_THP_DISABLE:
6596     case PR_GET_TSC:
6597     case PR_SET_TSC:
6598         /* Disable to prevent the target disabling stuff we need. */
6599         return -TARGET_EINVAL;
6600 
6601     default:
6602         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6603                       option);
6604         return -TARGET_EINVAL;
6605     }
6606 }
6607 
6608 #define NEW_STACK_SIZE 0x40000
6609 
6610 
6611 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6612 typedef struct {
6613     CPUArchState *env;
6614     pthread_mutex_t mutex;
6615     pthread_cond_t cond;
6616     pthread_t thread;
6617     uint32_t tid;
6618     abi_ulong child_tidptr;
6619     abi_ulong parent_tidptr;
6620     sigset_t sigmask;
6621 } new_thread_info;
6622 
6623 static void *clone_func(void *arg)
6624 {
6625     new_thread_info *info = arg;
6626     CPUArchState *env;
6627     CPUState *cpu;
6628     TaskState *ts;
6629 
6630     rcu_register_thread();
6631     tcg_register_thread();
6632     env = info->env;
6633     cpu = env_cpu(env);
6634     thread_cpu = cpu;
6635     ts = (TaskState *)cpu->opaque;
6636     info->tid = sys_gettid();
6637     task_settid(ts);
6638     if (info->child_tidptr)
6639         put_user_u32(info->tid, info->child_tidptr);
6640     if (info->parent_tidptr)
6641         put_user_u32(info->tid, info->parent_tidptr);
6642     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6643     /* Enable signals.  */
6644     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6645     /* Signal to the parent that we're ready.  */
6646     pthread_mutex_lock(&info->mutex);
6647     pthread_cond_broadcast(&info->cond);
6648     pthread_mutex_unlock(&info->mutex);
6649     /* Wait until the parent has finished initializing the tls state.  */
6650     pthread_mutex_lock(&clone_lock);
6651     pthread_mutex_unlock(&clone_lock);
6652     cpu_loop(env);
6653     /* never exits */
6654     return NULL;
6655 }
6656 
6657 /* do_fork() Must return host values and target errnos (unlike most
6658    do_*() functions). */
6659 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6660                    abi_ulong parent_tidptr, target_ulong newtls,
6661                    abi_ulong child_tidptr)
6662 {
6663     CPUState *cpu = env_cpu(env);
6664     int ret;
6665     TaskState *ts;
6666     CPUState *new_cpu;
6667     CPUArchState *new_env;
6668     sigset_t sigmask;
6669 
6670     flags &= ~CLONE_IGNORED_FLAGS;
6671 
6672     /* Emulate vfork() with fork() */
6673     if (flags & CLONE_VFORK)
6674         flags &= ~(CLONE_VFORK | CLONE_VM);
6675 
6676     if (flags & CLONE_VM) {
6677         TaskState *parent_ts = (TaskState *)cpu->opaque;
6678         new_thread_info info;
6679         pthread_attr_t attr;
6680 
6681         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6682             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6683             return -TARGET_EINVAL;
6684         }
6685 
6686         ts = g_new0(TaskState, 1);
6687         init_task_state(ts);
6688 
6689         /* Grab a mutex so that thread setup appears atomic.  */
6690         pthread_mutex_lock(&clone_lock);
6691 
6692         /*
6693          * If this is our first additional thread, we need to ensure we
6694          * generate code for parallel execution and flush old translations.
6695          * Do this now so that the copy gets CF_PARALLEL too.
6696          */
6697         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6698             cpu->tcg_cflags |= CF_PARALLEL;
6699             tb_flush(cpu);
6700         }
6701 
6702         /* we create a new CPU instance. */
6703         new_env = cpu_copy(env);
6704         /* Init regs that differ from the parent.  */
6705         cpu_clone_regs_child(new_env, newsp, flags);
6706         cpu_clone_regs_parent(env, flags);
6707         new_cpu = env_cpu(new_env);
6708         new_cpu->opaque = ts;
6709         ts->bprm = parent_ts->bprm;
6710         ts->info = parent_ts->info;
6711         ts->signal_mask = parent_ts->signal_mask;
6712 
6713         if (flags & CLONE_CHILD_CLEARTID) {
6714             ts->child_tidptr = child_tidptr;
6715         }
6716 
6717         if (flags & CLONE_SETTLS) {
6718             cpu_set_tls (new_env, newtls);
6719         }
6720 
6721         memset(&info, 0, sizeof(info));
6722         pthread_mutex_init(&info.mutex, NULL);
6723         pthread_mutex_lock(&info.mutex);
6724         pthread_cond_init(&info.cond, NULL);
6725         info.env = new_env;
6726         if (flags & CLONE_CHILD_SETTID) {
6727             info.child_tidptr = child_tidptr;
6728         }
6729         if (flags & CLONE_PARENT_SETTID) {
6730             info.parent_tidptr = parent_tidptr;
6731         }
6732 
6733         ret = pthread_attr_init(&attr);
6734         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6735         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6736         /* It is not safe to deliver signals until the child has finished
6737            initializing, so temporarily block all signals.  */
6738         sigfillset(&sigmask);
6739         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6740         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6741 
6742         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6743         /* TODO: Free new CPU state if thread creation failed.  */
6744 
6745         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6746         pthread_attr_destroy(&attr);
6747         if (ret == 0) {
6748             /* Wait for the child to initialize.  */
6749             pthread_cond_wait(&info.cond, &info.mutex);
6750             ret = info.tid;
6751         } else {
6752             ret = -1;
6753         }
6754         pthread_mutex_unlock(&info.mutex);
6755         pthread_cond_destroy(&info.cond);
6756         pthread_mutex_destroy(&info.mutex);
6757         pthread_mutex_unlock(&clone_lock);
6758     } else {
6759         /* if no CLONE_VM, we consider it is a fork */
6760         if (flags & CLONE_INVALID_FORK_FLAGS) {
6761             return -TARGET_EINVAL;
6762         }
6763 
6764         /* We can't support custom termination signals */
6765         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6766             return -TARGET_EINVAL;
6767         }
6768 
6769 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6770         if (flags & CLONE_PIDFD) {
6771             return -TARGET_EINVAL;
6772         }
6773 #endif
6774 
6775         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6776         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6777             return -TARGET_EINVAL;
6778         }
6779 
6780         if (block_signals()) {
6781             return -QEMU_ERESTARTSYS;
6782         }
6783 
6784         fork_start();
6785         ret = fork();
6786         if (ret == 0) {
6787             /* Child Process.  */
6788             cpu_clone_regs_child(env, newsp, flags);
6789             fork_end(1);
6790             /* There is a race condition here.  The parent process could
6791                theoretically read the TID in the child process before the child
6792                tid is set.  This would require using either ptrace
6793                (not implemented) or having *_tidptr to point at a shared memory
6794                mapping.  We can't repeat the spinlock hack used above because
6795                the child process gets its own copy of the lock.  */
6796             if (flags & CLONE_CHILD_SETTID)
6797                 put_user_u32(sys_gettid(), child_tidptr);
6798             if (flags & CLONE_PARENT_SETTID)
6799                 put_user_u32(sys_gettid(), parent_tidptr);
6800             ts = (TaskState *)cpu->opaque;
6801             if (flags & CLONE_SETTLS)
6802                 cpu_set_tls (env, newtls);
6803             if (flags & CLONE_CHILD_CLEARTID)
6804                 ts->child_tidptr = child_tidptr;
6805         } else {
6806             cpu_clone_regs_parent(env, flags);
6807             if (flags & CLONE_PIDFD) {
6808                 int pid_fd = 0;
6809 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6810                 int pid_child = ret;
6811                 pid_fd = pidfd_open(pid_child, 0);
6812                 if (pid_fd >= 0) {
6813                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6814                                                | FD_CLOEXEC);
6815                 } else {
6816                         pid_fd = 0;
6817                 }
6818 #endif
6819                 put_user_u32(pid_fd, parent_tidptr);
6820                 }
6821             fork_end(0);
6822         }
6823         g_assert(!cpu_in_exclusive_context(cpu));
6824     }
6825     return ret;
6826 }
6827 
6828 /* warning : doesn't handle linux specific flags... */
6829 static int target_to_host_fcntl_cmd(int cmd)
6830 {
6831     int ret;
6832 
6833     switch(cmd) {
6834     case TARGET_F_DUPFD:
6835     case TARGET_F_GETFD:
6836     case TARGET_F_SETFD:
6837     case TARGET_F_GETFL:
6838     case TARGET_F_SETFL:
6839     case TARGET_F_OFD_GETLK:
6840     case TARGET_F_OFD_SETLK:
6841     case TARGET_F_OFD_SETLKW:
6842         ret = cmd;
6843         break;
6844     case TARGET_F_GETLK:
6845         ret = F_GETLK64;
6846         break;
6847     case TARGET_F_SETLK:
6848         ret = F_SETLK64;
6849         break;
6850     case TARGET_F_SETLKW:
6851         ret = F_SETLKW64;
6852         break;
6853     case TARGET_F_GETOWN:
6854         ret = F_GETOWN;
6855         break;
6856     case TARGET_F_SETOWN:
6857         ret = F_SETOWN;
6858         break;
6859     case TARGET_F_GETSIG:
6860         ret = F_GETSIG;
6861         break;
6862     case TARGET_F_SETSIG:
6863         ret = F_SETSIG;
6864         break;
6865 #if TARGET_ABI_BITS == 32
6866     case TARGET_F_GETLK64:
6867         ret = F_GETLK64;
6868         break;
6869     case TARGET_F_SETLK64:
6870         ret = F_SETLK64;
6871         break;
6872     case TARGET_F_SETLKW64:
6873         ret = F_SETLKW64;
6874         break;
6875 #endif
6876     case TARGET_F_SETLEASE:
6877         ret = F_SETLEASE;
6878         break;
6879     case TARGET_F_GETLEASE:
6880         ret = F_GETLEASE;
6881         break;
6882 #ifdef F_DUPFD_CLOEXEC
6883     case TARGET_F_DUPFD_CLOEXEC:
6884         ret = F_DUPFD_CLOEXEC;
6885         break;
6886 #endif
6887     case TARGET_F_NOTIFY:
6888         ret = F_NOTIFY;
6889         break;
6890 #ifdef F_GETOWN_EX
6891     case TARGET_F_GETOWN_EX:
6892         ret = F_GETOWN_EX;
6893         break;
6894 #endif
6895 #ifdef F_SETOWN_EX
6896     case TARGET_F_SETOWN_EX:
6897         ret = F_SETOWN_EX;
6898         break;
6899 #endif
6900 #ifdef F_SETPIPE_SZ
6901     case TARGET_F_SETPIPE_SZ:
6902         ret = F_SETPIPE_SZ;
6903         break;
6904     case TARGET_F_GETPIPE_SZ:
6905         ret = F_GETPIPE_SZ;
6906         break;
6907 #endif
6908 #ifdef F_ADD_SEALS
6909     case TARGET_F_ADD_SEALS:
6910         ret = F_ADD_SEALS;
6911         break;
6912     case TARGET_F_GET_SEALS:
6913         ret = F_GET_SEALS;
6914         break;
6915 #endif
6916     default:
6917         ret = -TARGET_EINVAL;
6918         break;
6919     }
6920 
6921 #if defined(__powerpc64__)
6922     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6923      * is not supported by kernel. The glibc fcntl call actually adjusts
6924      * them to 5, 6 and 7 before making the syscall(). Since we make the
6925      * syscall directly, adjust to what is supported by the kernel.
6926      */
6927     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6928         ret -= F_GETLK64 - 5;
6929     }
6930 #endif
6931 
6932     return ret;
6933 }
6934 
6935 #define FLOCK_TRANSTBL \
6936     switch (type) { \
6937     TRANSTBL_CONVERT(F_RDLCK); \
6938     TRANSTBL_CONVERT(F_WRLCK); \
6939     TRANSTBL_CONVERT(F_UNLCK); \
6940     }
6941 
6942 static int target_to_host_flock(int type)
6943 {
6944 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6945     FLOCK_TRANSTBL
6946 #undef  TRANSTBL_CONVERT
6947     return -TARGET_EINVAL;
6948 }
6949 
6950 static int host_to_target_flock(int type)
6951 {
6952 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6953     FLOCK_TRANSTBL
6954 #undef  TRANSTBL_CONVERT
6955     /* if we don't know how to convert the value coming
6956      * from the host we copy to the target field as-is
6957      */
6958     return type;
6959 }
6960 
6961 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6962                                             abi_ulong target_flock_addr)
6963 {
6964     struct target_flock *target_fl;
6965     int l_type;
6966 
6967     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6968         return -TARGET_EFAULT;
6969     }
6970 
6971     __get_user(l_type, &target_fl->l_type);
6972     l_type = target_to_host_flock(l_type);
6973     if (l_type < 0) {
6974         return l_type;
6975     }
6976     fl->l_type = l_type;
6977     __get_user(fl->l_whence, &target_fl->l_whence);
6978     __get_user(fl->l_start, &target_fl->l_start);
6979     __get_user(fl->l_len, &target_fl->l_len);
6980     __get_user(fl->l_pid, &target_fl->l_pid);
6981     unlock_user_struct(target_fl, target_flock_addr, 0);
6982     return 0;
6983 }
6984 
6985 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6986                                           const struct flock64 *fl)
6987 {
6988     struct target_flock *target_fl;
6989     short l_type;
6990 
6991     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6992         return -TARGET_EFAULT;
6993     }
6994 
6995     l_type = host_to_target_flock(fl->l_type);
6996     __put_user(l_type, &target_fl->l_type);
6997     __put_user(fl->l_whence, &target_fl->l_whence);
6998     __put_user(fl->l_start, &target_fl->l_start);
6999     __put_user(fl->l_len, &target_fl->l_len);
7000     __put_user(fl->l_pid, &target_fl->l_pid);
7001     unlock_user_struct(target_fl, target_flock_addr, 1);
7002     return 0;
7003 }
7004 
7005 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
7006 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
7007 
7008 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7009 struct target_oabi_flock64 {
7010     abi_short l_type;
7011     abi_short l_whence;
7012     abi_llong l_start;
7013     abi_llong l_len;
7014     abi_int   l_pid;
7015 } QEMU_PACKED;
7016 
7017 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7018                                                    abi_ulong target_flock_addr)
7019 {
7020     struct target_oabi_flock64 *target_fl;
7021     int l_type;
7022 
7023     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7024         return -TARGET_EFAULT;
7025     }
7026 
7027     __get_user(l_type, &target_fl->l_type);
7028     l_type = target_to_host_flock(l_type);
7029     if (l_type < 0) {
7030         return l_type;
7031     }
7032     fl->l_type = l_type;
7033     __get_user(fl->l_whence, &target_fl->l_whence);
7034     __get_user(fl->l_start, &target_fl->l_start);
7035     __get_user(fl->l_len, &target_fl->l_len);
7036     __get_user(fl->l_pid, &target_fl->l_pid);
7037     unlock_user_struct(target_fl, target_flock_addr, 0);
7038     return 0;
7039 }
7040 
7041 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7042                                                  const struct flock64 *fl)
7043 {
7044     struct target_oabi_flock64 *target_fl;
7045     short l_type;
7046 
7047     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7048         return -TARGET_EFAULT;
7049     }
7050 
7051     l_type = host_to_target_flock(fl->l_type);
7052     __put_user(l_type, &target_fl->l_type);
7053     __put_user(fl->l_whence, &target_fl->l_whence);
7054     __put_user(fl->l_start, &target_fl->l_start);
7055     __put_user(fl->l_len, &target_fl->l_len);
7056     __put_user(fl->l_pid, &target_fl->l_pid);
7057     unlock_user_struct(target_fl, target_flock_addr, 1);
7058     return 0;
7059 }
7060 #endif
7061 
7062 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7063                                               abi_ulong target_flock_addr)
7064 {
7065     struct target_flock64 *target_fl;
7066     int l_type;
7067 
7068     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7069         return -TARGET_EFAULT;
7070     }
7071 
7072     __get_user(l_type, &target_fl->l_type);
7073     l_type = target_to_host_flock(l_type);
7074     if (l_type < 0) {
7075         return l_type;
7076     }
7077     fl->l_type = l_type;
7078     __get_user(fl->l_whence, &target_fl->l_whence);
7079     __get_user(fl->l_start, &target_fl->l_start);
7080     __get_user(fl->l_len, &target_fl->l_len);
7081     __get_user(fl->l_pid, &target_fl->l_pid);
7082     unlock_user_struct(target_fl, target_flock_addr, 0);
7083     return 0;
7084 }
7085 
7086 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7087                                             const struct flock64 *fl)
7088 {
7089     struct target_flock64 *target_fl;
7090     short l_type;
7091 
7092     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7093         return -TARGET_EFAULT;
7094     }
7095 
7096     l_type = host_to_target_flock(fl->l_type);
7097     __put_user(l_type, &target_fl->l_type);
7098     __put_user(fl->l_whence, &target_fl->l_whence);
7099     __put_user(fl->l_start, &target_fl->l_start);
7100     __put_user(fl->l_len, &target_fl->l_len);
7101     __put_user(fl->l_pid, &target_fl->l_pid);
7102     unlock_user_struct(target_fl, target_flock_addr, 1);
7103     return 0;
7104 }
7105 
7106 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7107 {
7108     struct flock64 fl64;
7109 #ifdef F_GETOWN_EX
7110     struct f_owner_ex fox;
7111     struct target_f_owner_ex *target_fox;
7112 #endif
7113     abi_long ret;
7114     int host_cmd = target_to_host_fcntl_cmd(cmd);
7115 
7116     if (host_cmd == -TARGET_EINVAL)
7117 	    return host_cmd;
7118 
7119     switch(cmd) {
7120     case TARGET_F_GETLK:
7121         ret = copy_from_user_flock(&fl64, arg);
7122         if (ret) {
7123             return ret;
7124         }
7125         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7126         if (ret == 0) {
7127             ret = copy_to_user_flock(arg, &fl64);
7128         }
7129         break;
7130 
7131     case TARGET_F_SETLK:
7132     case TARGET_F_SETLKW:
7133         ret = copy_from_user_flock(&fl64, arg);
7134         if (ret) {
7135             return ret;
7136         }
7137         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7138         break;
7139 
7140     case TARGET_F_GETLK64:
7141     case TARGET_F_OFD_GETLK:
7142         ret = copy_from_user_flock64(&fl64, arg);
7143         if (ret) {
7144             return ret;
7145         }
7146         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7147         if (ret == 0) {
7148             ret = copy_to_user_flock64(arg, &fl64);
7149         }
7150         break;
7151     case TARGET_F_SETLK64:
7152     case TARGET_F_SETLKW64:
7153     case TARGET_F_OFD_SETLK:
7154     case TARGET_F_OFD_SETLKW:
7155         ret = copy_from_user_flock64(&fl64, arg);
7156         if (ret) {
7157             return ret;
7158         }
7159         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7160         break;
7161 
7162     case TARGET_F_GETFL:
7163         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7164         if (ret >= 0) {
7165             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7166             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7167             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7168                 ret |= TARGET_O_LARGEFILE;
7169             }
7170         }
7171         break;
7172 
7173     case TARGET_F_SETFL:
7174         ret = get_errno(safe_fcntl(fd, host_cmd,
7175                                    target_to_host_bitmask(arg,
7176                                                           fcntl_flags_tbl)));
7177         break;
7178 
7179 #ifdef F_GETOWN_EX
7180     case TARGET_F_GETOWN_EX:
7181         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7182         if (ret >= 0) {
7183             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7184                 return -TARGET_EFAULT;
7185             target_fox->type = tswap32(fox.type);
7186             target_fox->pid = tswap32(fox.pid);
7187             unlock_user_struct(target_fox, arg, 1);
7188         }
7189         break;
7190 #endif
7191 
7192 #ifdef F_SETOWN_EX
7193     case TARGET_F_SETOWN_EX:
7194         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7195             return -TARGET_EFAULT;
7196         fox.type = tswap32(target_fox->type);
7197         fox.pid = tswap32(target_fox->pid);
7198         unlock_user_struct(target_fox, arg, 0);
7199         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7200         break;
7201 #endif
7202 
7203     case TARGET_F_SETSIG:
7204         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7205         break;
7206 
7207     case TARGET_F_GETSIG:
7208         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7209         break;
7210 
7211     case TARGET_F_SETOWN:
7212     case TARGET_F_GETOWN:
7213     case TARGET_F_SETLEASE:
7214     case TARGET_F_GETLEASE:
7215     case TARGET_F_SETPIPE_SZ:
7216     case TARGET_F_GETPIPE_SZ:
7217     case TARGET_F_ADD_SEALS:
7218     case TARGET_F_GET_SEALS:
7219         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7220         break;
7221 
7222     default:
7223         ret = get_errno(safe_fcntl(fd, cmd, arg));
7224         break;
7225     }
7226     return ret;
7227 }
7228 
7229 #ifdef USE_UID16
7230 
7231 static inline int high2lowuid(int uid)
7232 {
7233     if (uid > 65535)
7234         return 65534;
7235     else
7236         return uid;
7237 }
7238 
7239 static inline int high2lowgid(int gid)
7240 {
7241     if (gid > 65535)
7242         return 65534;
7243     else
7244         return gid;
7245 }
7246 
7247 static inline int low2highuid(int uid)
7248 {
7249     if ((int16_t)uid == -1)
7250         return -1;
7251     else
7252         return uid;
7253 }
7254 
7255 static inline int low2highgid(int gid)
7256 {
7257     if ((int16_t)gid == -1)
7258         return -1;
7259     else
7260         return gid;
7261 }
7262 static inline int tswapid(int id)
7263 {
7264     return tswap16(id);
7265 }
7266 
7267 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7268 
7269 #else /* !USE_UID16 */
7270 static inline int high2lowuid(int uid)
7271 {
7272     return uid;
7273 }
7274 static inline int high2lowgid(int gid)
7275 {
7276     return gid;
7277 }
7278 static inline int low2highuid(int uid)
7279 {
7280     return uid;
7281 }
7282 static inline int low2highgid(int gid)
7283 {
7284     return gid;
7285 }
7286 static inline int tswapid(int id)
7287 {
7288     return tswap32(id);
7289 }
7290 
7291 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7292 
7293 #endif /* USE_UID16 */
7294 
7295 /* We must do direct syscalls for setting UID/GID, because we want to
7296  * implement the Linux system call semantics of "change only for this thread",
7297  * not the libc/POSIX semantics of "change for all threads in process".
7298  * (See http://ewontfix.com/17/ for more details.)
7299  * We use the 32-bit version of the syscalls if present; if it is not
7300  * then either the host architecture supports 32-bit UIDs natively with
7301  * the standard syscall, or the 16-bit UID is the best we can do.
7302  */
7303 #ifdef __NR_setuid32
7304 #define __NR_sys_setuid __NR_setuid32
7305 #else
7306 #define __NR_sys_setuid __NR_setuid
7307 #endif
7308 #ifdef __NR_setgid32
7309 #define __NR_sys_setgid __NR_setgid32
7310 #else
7311 #define __NR_sys_setgid __NR_setgid
7312 #endif
7313 #ifdef __NR_setresuid32
7314 #define __NR_sys_setresuid __NR_setresuid32
7315 #else
7316 #define __NR_sys_setresuid __NR_setresuid
7317 #endif
7318 #ifdef __NR_setresgid32
7319 #define __NR_sys_setresgid __NR_setresgid32
7320 #else
7321 #define __NR_sys_setresgid __NR_setresgid
7322 #endif
7323 
7324 _syscall1(int, sys_setuid, uid_t, uid)
7325 _syscall1(int, sys_setgid, gid_t, gid)
7326 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7327 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7328 
7329 void syscall_init(void)
7330 {
7331     IOCTLEntry *ie;
7332     const argtype *arg_type;
7333     int size;
7334 
7335     thunk_init(STRUCT_MAX);
7336 
7337 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7338 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7339 #include "syscall_types.h"
7340 #undef STRUCT
7341 #undef STRUCT_SPECIAL
7342 
7343     /* we patch the ioctl size if necessary. We rely on the fact that
7344        no ioctl has all the bits at '1' in the size field */
7345     ie = ioctl_entries;
7346     while (ie->target_cmd != 0) {
7347         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7348             TARGET_IOC_SIZEMASK) {
7349             arg_type = ie->arg_type;
7350             if (arg_type[0] != TYPE_PTR) {
7351                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7352                         ie->target_cmd);
7353                 exit(1);
7354             }
7355             arg_type++;
7356             size = thunk_type_size(arg_type, 0);
7357             ie->target_cmd = (ie->target_cmd &
7358                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7359                 (size << TARGET_IOC_SIZESHIFT);
7360         }
7361 
7362         /* automatic consistency check if same arch */
7363 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7364     (defined(__x86_64__) && defined(TARGET_X86_64))
7365         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7366             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7367                     ie->name, ie->target_cmd, ie->host_cmd);
7368         }
7369 #endif
7370         ie++;
7371     }
7372 }
7373 
7374 #ifdef TARGET_NR_truncate64
7375 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7376                                          abi_long arg2,
7377                                          abi_long arg3,
7378                                          abi_long arg4)
7379 {
7380     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7381         arg2 = arg3;
7382         arg3 = arg4;
7383     }
7384     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7385 }
7386 #endif
7387 
7388 #ifdef TARGET_NR_ftruncate64
7389 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7390                                           abi_long arg2,
7391                                           abi_long arg3,
7392                                           abi_long arg4)
7393 {
7394     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7395         arg2 = arg3;
7396         arg3 = arg4;
7397     }
7398     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7399 }
7400 #endif
7401 
7402 #if defined(TARGET_NR_timer_settime) || \
7403     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7404 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7405                                                  abi_ulong target_addr)
7406 {
7407     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7408                                 offsetof(struct target_itimerspec,
7409                                          it_interval)) ||
7410         target_to_host_timespec(&host_its->it_value, target_addr +
7411                                 offsetof(struct target_itimerspec,
7412                                          it_value))) {
7413         return -TARGET_EFAULT;
7414     }
7415 
7416     return 0;
7417 }
7418 #endif
7419 
7420 #if defined(TARGET_NR_timer_settime64) || \
7421     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7422 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7423                                                    abi_ulong target_addr)
7424 {
7425     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7426                                   offsetof(struct target__kernel_itimerspec,
7427                                            it_interval)) ||
7428         target_to_host_timespec64(&host_its->it_value, target_addr +
7429                                   offsetof(struct target__kernel_itimerspec,
7430                                            it_value))) {
7431         return -TARGET_EFAULT;
7432     }
7433 
7434     return 0;
7435 }
7436 #endif
7437 
7438 #if ((defined(TARGET_NR_timerfd_gettime) || \
7439       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7440       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7441 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7442                                                  struct itimerspec *host_its)
7443 {
7444     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7445                                                        it_interval),
7446                                 &host_its->it_interval) ||
7447         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7448                                                        it_value),
7449                                 &host_its->it_value)) {
7450         return -TARGET_EFAULT;
7451     }
7452     return 0;
7453 }
7454 #endif
7455 
7456 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7457       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7458       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7459 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7460                                                    struct itimerspec *host_its)
7461 {
7462     if (host_to_target_timespec64(target_addr +
7463                                   offsetof(struct target__kernel_itimerspec,
7464                                            it_interval),
7465                                   &host_its->it_interval) ||
7466         host_to_target_timespec64(target_addr +
7467                                   offsetof(struct target__kernel_itimerspec,
7468                                            it_value),
7469                                   &host_its->it_value)) {
7470         return -TARGET_EFAULT;
7471     }
7472     return 0;
7473 }
7474 #endif
7475 
7476 #if defined(TARGET_NR_adjtimex) || \
7477     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7478 static inline abi_long target_to_host_timex(struct timex *host_tx,
7479                                             abi_long target_addr)
7480 {
7481     struct target_timex *target_tx;
7482 
7483     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7484         return -TARGET_EFAULT;
7485     }
7486 
7487     __get_user(host_tx->modes, &target_tx->modes);
7488     __get_user(host_tx->offset, &target_tx->offset);
7489     __get_user(host_tx->freq, &target_tx->freq);
7490     __get_user(host_tx->maxerror, &target_tx->maxerror);
7491     __get_user(host_tx->esterror, &target_tx->esterror);
7492     __get_user(host_tx->status, &target_tx->status);
7493     __get_user(host_tx->constant, &target_tx->constant);
7494     __get_user(host_tx->precision, &target_tx->precision);
7495     __get_user(host_tx->tolerance, &target_tx->tolerance);
7496     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7497     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7498     __get_user(host_tx->tick, &target_tx->tick);
7499     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7500     __get_user(host_tx->jitter, &target_tx->jitter);
7501     __get_user(host_tx->shift, &target_tx->shift);
7502     __get_user(host_tx->stabil, &target_tx->stabil);
7503     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7504     __get_user(host_tx->calcnt, &target_tx->calcnt);
7505     __get_user(host_tx->errcnt, &target_tx->errcnt);
7506     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7507     __get_user(host_tx->tai, &target_tx->tai);
7508 
7509     unlock_user_struct(target_tx, target_addr, 0);
7510     return 0;
7511 }
7512 
7513 static inline abi_long host_to_target_timex(abi_long target_addr,
7514                                             struct timex *host_tx)
7515 {
7516     struct target_timex *target_tx;
7517 
7518     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7519         return -TARGET_EFAULT;
7520     }
7521 
7522     __put_user(host_tx->modes, &target_tx->modes);
7523     __put_user(host_tx->offset, &target_tx->offset);
7524     __put_user(host_tx->freq, &target_tx->freq);
7525     __put_user(host_tx->maxerror, &target_tx->maxerror);
7526     __put_user(host_tx->esterror, &target_tx->esterror);
7527     __put_user(host_tx->status, &target_tx->status);
7528     __put_user(host_tx->constant, &target_tx->constant);
7529     __put_user(host_tx->precision, &target_tx->precision);
7530     __put_user(host_tx->tolerance, &target_tx->tolerance);
7531     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7532     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7533     __put_user(host_tx->tick, &target_tx->tick);
7534     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7535     __put_user(host_tx->jitter, &target_tx->jitter);
7536     __put_user(host_tx->shift, &target_tx->shift);
7537     __put_user(host_tx->stabil, &target_tx->stabil);
7538     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7539     __put_user(host_tx->calcnt, &target_tx->calcnt);
7540     __put_user(host_tx->errcnt, &target_tx->errcnt);
7541     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7542     __put_user(host_tx->tai, &target_tx->tai);
7543 
7544     unlock_user_struct(target_tx, target_addr, 1);
7545     return 0;
7546 }
7547 #endif
7548 
7549 
7550 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7551 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7552                                               abi_long target_addr)
7553 {
7554     struct target__kernel_timex *target_tx;
7555 
7556     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7557                                  offsetof(struct target__kernel_timex,
7558                                           time))) {
7559         return -TARGET_EFAULT;
7560     }
7561 
7562     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7563         return -TARGET_EFAULT;
7564     }
7565 
7566     __get_user(host_tx->modes, &target_tx->modes);
7567     __get_user(host_tx->offset, &target_tx->offset);
7568     __get_user(host_tx->freq, &target_tx->freq);
7569     __get_user(host_tx->maxerror, &target_tx->maxerror);
7570     __get_user(host_tx->esterror, &target_tx->esterror);
7571     __get_user(host_tx->status, &target_tx->status);
7572     __get_user(host_tx->constant, &target_tx->constant);
7573     __get_user(host_tx->precision, &target_tx->precision);
7574     __get_user(host_tx->tolerance, &target_tx->tolerance);
7575     __get_user(host_tx->tick, &target_tx->tick);
7576     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7577     __get_user(host_tx->jitter, &target_tx->jitter);
7578     __get_user(host_tx->shift, &target_tx->shift);
7579     __get_user(host_tx->stabil, &target_tx->stabil);
7580     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7581     __get_user(host_tx->calcnt, &target_tx->calcnt);
7582     __get_user(host_tx->errcnt, &target_tx->errcnt);
7583     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7584     __get_user(host_tx->tai, &target_tx->tai);
7585 
7586     unlock_user_struct(target_tx, target_addr, 0);
7587     return 0;
7588 }
7589 
7590 static inline abi_long host_to_target_timex64(abi_long target_addr,
7591                                               struct timex *host_tx)
7592 {
7593     struct target__kernel_timex *target_tx;
7594 
7595    if (copy_to_user_timeval64(target_addr +
7596                               offsetof(struct target__kernel_timex, time),
7597                               &host_tx->time)) {
7598         return -TARGET_EFAULT;
7599     }
7600 
7601     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7602         return -TARGET_EFAULT;
7603     }
7604 
7605     __put_user(host_tx->modes, &target_tx->modes);
7606     __put_user(host_tx->offset, &target_tx->offset);
7607     __put_user(host_tx->freq, &target_tx->freq);
7608     __put_user(host_tx->maxerror, &target_tx->maxerror);
7609     __put_user(host_tx->esterror, &target_tx->esterror);
7610     __put_user(host_tx->status, &target_tx->status);
7611     __put_user(host_tx->constant, &target_tx->constant);
7612     __put_user(host_tx->precision, &target_tx->precision);
7613     __put_user(host_tx->tolerance, &target_tx->tolerance);
7614     __put_user(host_tx->tick, &target_tx->tick);
7615     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7616     __put_user(host_tx->jitter, &target_tx->jitter);
7617     __put_user(host_tx->shift, &target_tx->shift);
7618     __put_user(host_tx->stabil, &target_tx->stabil);
7619     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7620     __put_user(host_tx->calcnt, &target_tx->calcnt);
7621     __put_user(host_tx->errcnt, &target_tx->errcnt);
7622     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7623     __put_user(host_tx->tai, &target_tx->tai);
7624 
7625     unlock_user_struct(target_tx, target_addr, 1);
7626     return 0;
7627 }
7628 #endif
7629 
7630 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7631 #define sigev_notify_thread_id _sigev_un._tid
7632 #endif
7633 
7634 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7635                                                abi_ulong target_addr)
7636 {
7637     struct target_sigevent *target_sevp;
7638 
7639     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7640         return -TARGET_EFAULT;
7641     }
7642 
7643     /* This union is awkward on 64 bit systems because it has a 32 bit
7644      * integer and a pointer in it; we follow the conversion approach
7645      * used for handling sigval types in signal.c so the guest should get
7646      * the correct value back even if we did a 64 bit byteswap and it's
7647      * using the 32 bit integer.
7648      */
7649     host_sevp->sigev_value.sival_ptr =
7650         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7651     host_sevp->sigev_signo =
7652         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7653     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7654     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7655 
7656     unlock_user_struct(target_sevp, target_addr, 1);
7657     return 0;
7658 }
7659 
7660 #if defined(TARGET_NR_mlockall)
7661 static inline int target_to_host_mlockall_arg(int arg)
7662 {
7663     int result = 0;
7664 
7665     if (arg & TARGET_MCL_CURRENT) {
7666         result |= MCL_CURRENT;
7667     }
7668     if (arg & TARGET_MCL_FUTURE) {
7669         result |= MCL_FUTURE;
7670     }
7671 #ifdef MCL_ONFAULT
7672     if (arg & TARGET_MCL_ONFAULT) {
7673         result |= MCL_ONFAULT;
7674     }
7675 #endif
7676 
7677     return result;
7678 }
7679 #endif
7680 
7681 static inline int target_to_host_msync_arg(abi_long arg)
7682 {
7683     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7684            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7685            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7686            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7687 }
7688 
7689 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7690      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7691      defined(TARGET_NR_newfstatat))
7692 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7693                                              abi_ulong target_addr,
7694                                              struct stat *host_st)
7695 {
7696 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7697     if (cpu_env->eabi) {
7698         struct target_eabi_stat64 *target_st;
7699 
7700         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7701             return -TARGET_EFAULT;
7702         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7703         __put_user(host_st->st_dev, &target_st->st_dev);
7704         __put_user(host_st->st_ino, &target_st->st_ino);
7705 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7706         __put_user(host_st->st_ino, &target_st->__st_ino);
7707 #endif
7708         __put_user(host_st->st_mode, &target_st->st_mode);
7709         __put_user(host_st->st_nlink, &target_st->st_nlink);
7710         __put_user(host_st->st_uid, &target_st->st_uid);
7711         __put_user(host_st->st_gid, &target_st->st_gid);
7712         __put_user(host_st->st_rdev, &target_st->st_rdev);
7713         __put_user(host_st->st_size, &target_st->st_size);
7714         __put_user(host_st->st_blksize, &target_st->st_blksize);
7715         __put_user(host_st->st_blocks, &target_st->st_blocks);
7716         __put_user(host_st->st_atime, &target_st->target_st_atime);
7717         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7718         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7719 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7720         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7721         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7722         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7723 #endif
7724         unlock_user_struct(target_st, target_addr, 1);
7725     } else
7726 #endif
7727     {
7728 #if defined(TARGET_HAS_STRUCT_STAT64)
7729         struct target_stat64 *target_st;
7730 #else
7731         struct target_stat *target_st;
7732 #endif
7733 
7734         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7735             return -TARGET_EFAULT;
7736         memset(target_st, 0, sizeof(*target_st));
7737         __put_user(host_st->st_dev, &target_st->st_dev);
7738         __put_user(host_st->st_ino, &target_st->st_ino);
7739 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7740         __put_user(host_st->st_ino, &target_st->__st_ino);
7741 #endif
7742         __put_user(host_st->st_mode, &target_st->st_mode);
7743         __put_user(host_st->st_nlink, &target_st->st_nlink);
7744         __put_user(host_st->st_uid, &target_st->st_uid);
7745         __put_user(host_st->st_gid, &target_st->st_gid);
7746         __put_user(host_st->st_rdev, &target_st->st_rdev);
7747         /* XXX: better use of kernel struct */
7748         __put_user(host_st->st_size, &target_st->st_size);
7749         __put_user(host_st->st_blksize, &target_st->st_blksize);
7750         __put_user(host_st->st_blocks, &target_st->st_blocks);
7751         __put_user(host_st->st_atime, &target_st->target_st_atime);
7752         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7753         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7754 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7755         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7756         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7757         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7758 #endif
7759         unlock_user_struct(target_st, target_addr, 1);
7760     }
7761 
7762     return 0;
7763 }
7764 #endif
7765 
7766 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7767 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7768                                             abi_ulong target_addr)
7769 {
7770     struct target_statx *target_stx;
7771 
7772     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7773         return -TARGET_EFAULT;
7774     }
7775     memset(target_stx, 0, sizeof(*target_stx));
7776 
7777     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7778     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7779     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7780     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7781     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7782     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7783     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7784     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7785     __put_user(host_stx->stx_size, &target_stx->stx_size);
7786     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7787     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7788     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7789     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7790     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7791     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7792     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7793     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7794     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7795     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7796     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7797     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7798     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7799     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7800 
7801     unlock_user_struct(target_stx, target_addr, 1);
7802 
7803     return 0;
7804 }
7805 #endif
7806 
7807 static int do_sys_futex(int *uaddr, int op, int val,
7808                          const struct timespec *timeout, int *uaddr2,
7809                          int val3)
7810 {
7811 #if HOST_LONG_BITS == 64
7812 #if defined(__NR_futex)
7813     /* always a 64-bit time_t, it doesn't define _time64 version  */
7814     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7815 
7816 #endif
7817 #else /* HOST_LONG_BITS == 64 */
7818 #if defined(__NR_futex_time64)
7819     if (sizeof(timeout->tv_sec) == 8) {
7820         /* _time64 function on 32bit arch */
7821         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7822     }
7823 #endif
7824 #if defined(__NR_futex)
7825     /* old function on 32bit arch */
7826     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7827 #endif
7828 #endif /* HOST_LONG_BITS == 64 */
7829     g_assert_not_reached();
7830 }
7831 
7832 static int do_safe_futex(int *uaddr, int op, int val,
7833                          const struct timespec *timeout, int *uaddr2,
7834                          int val3)
7835 {
7836 #if HOST_LONG_BITS == 64
7837 #if defined(__NR_futex)
7838     /* always a 64-bit time_t, it doesn't define _time64 version  */
7839     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7840 #endif
7841 #else /* HOST_LONG_BITS == 64 */
7842 #if defined(__NR_futex_time64)
7843     if (sizeof(timeout->tv_sec) == 8) {
7844         /* _time64 function on 32bit arch */
7845         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7846                                            val3));
7847     }
7848 #endif
7849 #if defined(__NR_futex)
7850     /* old function on 32bit arch */
7851     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7852 #endif
7853 #endif /* HOST_LONG_BITS == 64 */
7854     return -TARGET_ENOSYS;
7855 }
7856 
7857 /* ??? Using host futex calls even when target atomic operations
7858    are not really atomic probably breaks things.  However implementing
7859    futexes locally would make futexes shared between multiple processes
7860    tricky.  However they're probably useless because guest atomic
7861    operations won't work either.  */
7862 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7863 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7864                     int op, int val, target_ulong timeout,
7865                     target_ulong uaddr2, int val3)
7866 {
7867     struct timespec ts, *pts = NULL;
7868     void *haddr2 = NULL;
7869     int base_op;
7870 
7871     /* We assume FUTEX_* constants are the same on both host and target. */
7872 #ifdef FUTEX_CMD_MASK
7873     base_op = op & FUTEX_CMD_MASK;
7874 #else
7875     base_op = op;
7876 #endif
7877     switch (base_op) {
7878     case FUTEX_WAIT:
7879     case FUTEX_WAIT_BITSET:
7880         val = tswap32(val);
7881         break;
7882     case FUTEX_WAIT_REQUEUE_PI:
7883         val = tswap32(val);
7884         haddr2 = g2h(cpu, uaddr2);
7885         break;
7886     case FUTEX_LOCK_PI:
7887     case FUTEX_LOCK_PI2:
7888         break;
7889     case FUTEX_WAKE:
7890     case FUTEX_WAKE_BITSET:
7891     case FUTEX_TRYLOCK_PI:
7892     case FUTEX_UNLOCK_PI:
7893         timeout = 0;
7894         break;
7895     case FUTEX_FD:
7896         val = target_to_host_signal(val);
7897         timeout = 0;
7898         break;
7899     case FUTEX_CMP_REQUEUE:
7900     case FUTEX_CMP_REQUEUE_PI:
7901         val3 = tswap32(val3);
7902         /* fall through */
7903     case FUTEX_REQUEUE:
7904     case FUTEX_WAKE_OP:
7905         /*
7906          * For these, the 4th argument is not TIMEOUT, but VAL2.
7907          * But the prototype of do_safe_futex takes a pointer, so
7908          * insert casts to satisfy the compiler.  We do not need
7909          * to tswap VAL2 since it's not compared to guest memory.
7910           */
7911         pts = (struct timespec *)(uintptr_t)timeout;
7912         timeout = 0;
7913         haddr2 = g2h(cpu, uaddr2);
7914         break;
7915     default:
7916         return -TARGET_ENOSYS;
7917     }
7918     if (timeout) {
7919         pts = &ts;
7920         if (time64
7921             ? target_to_host_timespec64(pts, timeout)
7922             : target_to_host_timespec(pts, timeout)) {
7923             return -TARGET_EFAULT;
7924         }
7925     }
7926     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7927 }
7928 #endif
7929 
7930 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7931 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7932                                      abi_long handle, abi_long mount_id,
7933                                      abi_long flags)
7934 {
7935     struct file_handle *target_fh;
7936     struct file_handle *fh;
7937     int mid = 0;
7938     abi_long ret;
7939     char *name;
7940     unsigned int size, total_size;
7941 
7942     if (get_user_s32(size, handle)) {
7943         return -TARGET_EFAULT;
7944     }
7945 
7946     name = lock_user_string(pathname);
7947     if (!name) {
7948         return -TARGET_EFAULT;
7949     }
7950 
7951     total_size = sizeof(struct file_handle) + size;
7952     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7953     if (!target_fh) {
7954         unlock_user(name, pathname, 0);
7955         return -TARGET_EFAULT;
7956     }
7957 
7958     fh = g_malloc0(total_size);
7959     fh->handle_bytes = size;
7960 
7961     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7962     unlock_user(name, pathname, 0);
7963 
7964     /* man name_to_handle_at(2):
7965      * Other than the use of the handle_bytes field, the caller should treat
7966      * the file_handle structure as an opaque data type
7967      */
7968 
7969     memcpy(target_fh, fh, total_size);
7970     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7971     target_fh->handle_type = tswap32(fh->handle_type);
7972     g_free(fh);
7973     unlock_user(target_fh, handle, total_size);
7974 
7975     if (put_user_s32(mid, mount_id)) {
7976         return -TARGET_EFAULT;
7977     }
7978 
7979     return ret;
7980 
7981 }
7982 #endif
7983 
7984 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7985 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7986                                      abi_long flags)
7987 {
7988     struct file_handle *target_fh;
7989     struct file_handle *fh;
7990     unsigned int size, total_size;
7991     abi_long ret;
7992 
7993     if (get_user_s32(size, handle)) {
7994         return -TARGET_EFAULT;
7995     }
7996 
7997     total_size = sizeof(struct file_handle) + size;
7998     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7999     if (!target_fh) {
8000         return -TARGET_EFAULT;
8001     }
8002 
8003     fh = g_memdup(target_fh, total_size);
8004     fh->handle_bytes = size;
8005     fh->handle_type = tswap32(target_fh->handle_type);
8006 
8007     ret = get_errno(open_by_handle_at(mount_fd, fh,
8008                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
8009 
8010     g_free(fh);
8011 
8012     unlock_user(target_fh, handle, total_size);
8013 
8014     return ret;
8015 }
8016 #endif
8017 
8018 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8019 
8020 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8021 {
8022     int host_flags;
8023     target_sigset_t *target_mask;
8024     sigset_t host_mask;
8025     abi_long ret;
8026 
8027     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8028         return -TARGET_EINVAL;
8029     }
8030     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8031         return -TARGET_EFAULT;
8032     }
8033 
8034     target_to_host_sigset(&host_mask, target_mask);
8035 
8036     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8037 
8038     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8039     if (ret >= 0) {
8040         fd_trans_register(ret, &target_signalfd_trans);
8041     }
8042 
8043     unlock_user_struct(target_mask, mask, 0);
8044 
8045     return ret;
8046 }
8047 #endif
8048 
8049 /* Map host to target signal numbers for the wait family of syscalls.
8050    Assume all other status bits are the same.  */
8051 int host_to_target_waitstatus(int status)
8052 {
8053     if (WIFSIGNALED(status)) {
8054         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8055     }
8056     if (WIFSTOPPED(status)) {
8057         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8058                | (status & 0xff);
8059     }
8060     return status;
8061 }
8062 
8063 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8064 {
8065     CPUState *cpu = env_cpu(cpu_env);
8066     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8067     int i;
8068 
8069     for (i = 0; i < bprm->argc; i++) {
8070         size_t len = strlen(bprm->argv[i]) + 1;
8071 
8072         if (write(fd, bprm->argv[i], len) != len) {
8073             return -1;
8074         }
8075     }
8076 
8077     return 0;
8078 }
8079 
8080 static void show_smaps(int fd, unsigned long size)
8081 {
8082     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8083     unsigned long size_kb = size >> 10;
8084 
8085     dprintf(fd, "Size:                  %lu kB\n"
8086                 "KernelPageSize:        %lu kB\n"
8087                 "MMUPageSize:           %lu kB\n"
8088                 "Rss:                   0 kB\n"
8089                 "Pss:                   0 kB\n"
8090                 "Pss_Dirty:             0 kB\n"
8091                 "Shared_Clean:          0 kB\n"
8092                 "Shared_Dirty:          0 kB\n"
8093                 "Private_Clean:         0 kB\n"
8094                 "Private_Dirty:         0 kB\n"
8095                 "Referenced:            0 kB\n"
8096                 "Anonymous:             0 kB\n"
8097                 "LazyFree:              0 kB\n"
8098                 "AnonHugePages:         0 kB\n"
8099                 "ShmemPmdMapped:        0 kB\n"
8100                 "FilePmdMapped:         0 kB\n"
8101                 "Shared_Hugetlb:        0 kB\n"
8102                 "Private_Hugetlb:       0 kB\n"
8103                 "Swap:                  0 kB\n"
8104                 "SwapPss:               0 kB\n"
8105                 "Locked:                0 kB\n"
8106                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8107 }
8108 
8109 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8110 {
8111     CPUState *cpu = env_cpu(cpu_env);
8112     TaskState *ts = cpu->opaque;
8113     GSList *map_info = read_self_maps();
8114     GSList *s;
8115     int count;
8116 
8117     for (s = map_info; s; s = g_slist_next(s)) {
8118         MapInfo *e = (MapInfo *) s->data;
8119 
8120         if (h2g_valid(e->start)) {
8121             unsigned long min = e->start;
8122             unsigned long max = e->end;
8123             int flags = page_get_flags(h2g(min));
8124             const char *path;
8125 
8126             max = h2g_valid(max - 1) ?
8127                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8128 
8129             if (!page_check_range(h2g(min), max - min, flags)) {
8130                 continue;
8131             }
8132 
8133 #ifdef TARGET_HPPA
8134             if (h2g(max) == ts->info->stack_limit) {
8135 #else
8136             if (h2g(min) == ts->info->stack_limit) {
8137 #endif
8138                 path = "[stack]";
8139             } else {
8140                 path = e->path;
8141             }
8142 
8143             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8144                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8145                             h2g(min), h2g(max - 1) + 1,
8146                             (flags & PAGE_READ) ? 'r' : '-',
8147                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8148                             (flags & PAGE_EXEC) ? 'x' : '-',
8149                             e->is_priv ? 'p' : 's',
8150                             (uint64_t) e->offset, e->dev, e->inode);
8151             if (path) {
8152                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8153             } else {
8154                 dprintf(fd, "\n");
8155             }
8156             if (smaps) {
8157                 show_smaps(fd, max - min);
8158                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8159                         (flags & PAGE_READ) ? " rd" : "",
8160                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8161                         (flags & PAGE_EXEC) ? " ex" : "",
8162                         e->is_priv ? "" : " sh",
8163                         (flags & PAGE_READ) ? " mr" : "",
8164                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8165                         (flags & PAGE_EXEC) ? " me" : "",
8166                         e->is_priv ? "" : " ms");
8167             }
8168         }
8169     }
8170 
8171     free_self_maps(map_info);
8172 
8173 #ifdef TARGET_VSYSCALL_PAGE
8174     /*
8175      * We only support execution from the vsyscall page.
8176      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8177      */
8178     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8179                     " --xp 00000000 00:00 0",
8180                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8181     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8182     if (smaps) {
8183         show_smaps(fd, TARGET_PAGE_SIZE);
8184         dprintf(fd, "VmFlags: ex\n");
8185     }
8186 #endif
8187 
8188     return 0;
8189 }
8190 
8191 static int open_self_maps(CPUArchState *cpu_env, int fd)
8192 {
8193     return open_self_maps_1(cpu_env, fd, false);
8194 }
8195 
8196 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8197 {
8198     return open_self_maps_1(cpu_env, fd, true);
8199 }
8200 
8201 static int open_self_stat(CPUArchState *cpu_env, int fd)
8202 {
8203     CPUState *cpu = env_cpu(cpu_env);
8204     TaskState *ts = cpu->opaque;
8205     g_autoptr(GString) buf = g_string_new(NULL);
8206     int i;
8207 
8208     for (i = 0; i < 44; i++) {
8209         if (i == 0) {
8210             /* pid */
8211             g_string_printf(buf, FMT_pid " ", getpid());
8212         } else if (i == 1) {
8213             /* app name */
8214             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8215             bin = bin ? bin + 1 : ts->bprm->argv[0];
8216             g_string_printf(buf, "(%.15s) ", bin);
8217         } else if (i == 2) {
8218             /* task state */
8219             g_string_assign(buf, "R "); /* we are running right now */
8220         } else if (i == 3) {
8221             /* ppid */
8222             g_string_printf(buf, FMT_pid " ", getppid());
8223         } else if (i == 21) {
8224             /* starttime */
8225             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8226         } else if (i == 27) {
8227             /* stack bottom */
8228             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8229         } else {
8230             /* for the rest, there is MasterCard */
8231             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8232         }
8233 
8234         if (write(fd, buf->str, buf->len) != buf->len) {
8235             return -1;
8236         }
8237     }
8238 
8239     return 0;
8240 }
8241 
8242 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8243 {
8244     CPUState *cpu = env_cpu(cpu_env);
8245     TaskState *ts = cpu->opaque;
8246     abi_ulong auxv = ts->info->saved_auxv;
8247     abi_ulong len = ts->info->auxv_len;
8248     char *ptr;
8249 
8250     /*
8251      * Auxiliary vector is stored in target process stack.
8252      * read in whole auxv vector and copy it to file
8253      */
8254     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8255     if (ptr != NULL) {
8256         while (len > 0) {
8257             ssize_t r;
8258             r = write(fd, ptr, len);
8259             if (r <= 0) {
8260                 break;
8261             }
8262             len -= r;
8263             ptr += r;
8264         }
8265         lseek(fd, 0, SEEK_SET);
8266         unlock_user(ptr, auxv, len);
8267     }
8268 
8269     return 0;
8270 }
8271 
8272 static int is_proc_myself(const char *filename, const char *entry)
8273 {
8274     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8275         filename += strlen("/proc/");
8276         if (!strncmp(filename, "self/", strlen("self/"))) {
8277             filename += strlen("self/");
8278         } else if (*filename >= '1' && *filename <= '9') {
8279             char myself[80];
8280             snprintf(myself, sizeof(myself), "%d/", getpid());
8281             if (!strncmp(filename, myself, strlen(myself))) {
8282                 filename += strlen(myself);
8283             } else {
8284                 return 0;
8285             }
8286         } else {
8287             return 0;
8288         }
8289         if (!strcmp(filename, entry)) {
8290             return 1;
8291         }
8292     }
8293     return 0;
8294 }
8295 
8296 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8297                       const char *fmt, int code)
8298 {
8299     if (logfile) {
8300         CPUState *cs = env_cpu(env);
8301 
8302         fprintf(logfile, fmt, code);
8303         fprintf(logfile, "Failing executable: %s\n", exec_path);
8304         cpu_dump_state(cs, logfile, 0);
8305         open_self_maps(env, fileno(logfile));
8306     }
8307 }
8308 
8309 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8310 {
8311     /* dump to console */
8312     excp_dump_file(stderr, env, fmt, code);
8313 
8314     /* dump to log file */
8315     if (qemu_log_separate()) {
8316         FILE *logfile = qemu_log_trylock();
8317 
8318         excp_dump_file(logfile, env, fmt, code);
8319         qemu_log_unlock(logfile);
8320     }
8321 }
8322 
8323 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8324     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8325     defined(TARGET_RISCV) || defined(TARGET_S390X)
8326 static int is_proc(const char *filename, const char *entry)
8327 {
8328     return strcmp(filename, entry) == 0;
8329 }
8330 #endif
8331 
8332 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8333 static int open_net_route(CPUArchState *cpu_env, int fd)
8334 {
8335     FILE *fp;
8336     char *line = NULL;
8337     size_t len = 0;
8338     ssize_t read;
8339 
8340     fp = fopen("/proc/net/route", "r");
8341     if (fp == NULL) {
8342         return -1;
8343     }
8344 
8345     /* read header */
8346 
8347     read = getline(&line, &len, fp);
8348     dprintf(fd, "%s", line);
8349 
8350     /* read routes */
8351 
8352     while ((read = getline(&line, &len, fp)) != -1) {
8353         char iface[16];
8354         uint32_t dest, gw, mask;
8355         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8356         int fields;
8357 
8358         fields = sscanf(line,
8359                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8360                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8361                         &mask, &mtu, &window, &irtt);
8362         if (fields != 11) {
8363             continue;
8364         }
8365         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8366                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8367                 metric, tswap32(mask), mtu, window, irtt);
8368     }
8369 
8370     free(line);
8371     fclose(fp);
8372 
8373     return 0;
8374 }
8375 #endif
8376 
8377 #if defined(TARGET_SPARC)
8378 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8379 {
8380     dprintf(fd, "type\t\t: sun4u\n");
8381     return 0;
8382 }
8383 #endif
8384 
8385 #if defined(TARGET_HPPA)
8386 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8387 {
8388     int i, num_cpus;
8389 
8390     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8391     for (i = 0; i < num_cpus; i++) {
8392         dprintf(fd, "processor\t: %d\n", i);
8393         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8394         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8395         dprintf(fd, "capabilities\t: os32\n");
8396         dprintf(fd, "model\t\t: 9000/778/B160L - "
8397                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8398     }
8399     return 0;
8400 }
8401 #endif
8402 
8403 #if defined(TARGET_RISCV)
8404 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8405 {
8406     int i;
8407     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8408     RISCVCPU *cpu = env_archcpu(cpu_env);
8409     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8410     char *isa_string = riscv_isa_string(cpu);
8411     const char *mmu;
8412 
8413     if (cfg->mmu) {
8414         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8415     } else {
8416         mmu = "none";
8417     }
8418 
8419     for (i = 0; i < num_cpus; i++) {
8420         dprintf(fd, "processor\t: %d\n", i);
8421         dprintf(fd, "hart\t\t: %d\n", i);
8422         dprintf(fd, "isa\t\t: %s\n", isa_string);
8423         dprintf(fd, "mmu\t\t: %s\n", mmu);
8424         dprintf(fd, "uarch\t\t: qemu\n\n");
8425     }
8426 
8427     g_free(isa_string);
8428     return 0;
8429 }
8430 #endif
8431 
8432 #if defined(TARGET_S390X)
8433 /*
8434  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8435  * show in /proc/cpuinfo.
8436  *
8437  * Skip the following in order to match the missing support in op_ecag():
8438  * - show_cacheinfo().
8439  * - show_cpu_topology().
8440  * - show_cpu_mhz().
8441  *
8442  * Use fixed values for certain fields:
8443  * - bogomips per cpu - from a qemu-system-s390x run.
8444  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8445  *
8446  * Keep the code structure close to arch/s390/kernel/processor.c.
8447  */
8448 
8449 static void show_facilities(int fd)
8450 {
8451     size_t sizeof_stfl_bytes = 2048;
8452     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8453     unsigned int bit;
8454 
8455     dprintf(fd, "facilities      :");
8456     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8457     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8458         if (test_be_bit(bit, stfl_bytes)) {
8459             dprintf(fd, " %d", bit);
8460         }
8461     }
8462     dprintf(fd, "\n");
8463 }
8464 
8465 static int cpu_ident(unsigned long n)
8466 {
8467     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8468                      n);
8469 }
8470 
8471 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8472 {
8473     S390CPUModel *model = env_archcpu(cpu_env)->model;
8474     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8475     uint32_t elf_hwcap = get_elf_hwcap();
8476     const char *hwcap_str;
8477     int i;
8478 
8479     dprintf(fd, "vendor_id       : IBM/S390\n"
8480                 "# processors    : %i\n"
8481                 "bogomips per cpu: 13370.00\n",
8482             num_cpus);
8483     dprintf(fd, "max thread id   : 0\n");
8484     dprintf(fd, "features\t: ");
8485     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8486         if (!(elf_hwcap & (1 << i))) {
8487             continue;
8488         }
8489         hwcap_str = elf_hwcap_str(i);
8490         if (hwcap_str) {
8491             dprintf(fd, "%s ", hwcap_str);
8492         }
8493     }
8494     dprintf(fd, "\n");
8495     show_facilities(fd);
8496     for (i = 0; i < num_cpus; i++) {
8497         dprintf(fd, "processor %d: "
8498                "version = %02X,  "
8499                "identification = %06X,  "
8500                "machine = %04X\n",
8501                i, model->cpu_ver, cpu_ident(i), model->def->type);
8502     }
8503 }
8504 
8505 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8506 {
8507     S390CPUModel *model = env_archcpu(cpu_env)->model;
8508 
8509     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8510     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8511     dprintf(fd, "machine         : %04X\n", model->def->type);
8512 }
8513 
8514 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8515 {
8516     dprintf(fd, "\ncpu number      : %ld\n", n);
8517     show_cpu_ids(cpu_env, fd, n);
8518 }
8519 
8520 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8521 {
8522     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8523     int i;
8524 
8525     show_cpu_summary(cpu_env, fd);
8526     for (i = 0; i < num_cpus; i++) {
8527         show_cpuinfo(cpu_env, fd, i);
8528     }
8529     return 0;
8530 }
8531 #endif
8532 
8533 #if defined(TARGET_M68K)
8534 static int open_hardware(CPUArchState *cpu_env, int fd)
8535 {
8536     dprintf(fd, "Model:\t\tqemu-m68k\n");
8537     return 0;
8538 }
8539 #endif
8540 
8541 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8542                     int flags, mode_t mode, bool safe)
8543 {
8544     struct fake_open {
8545         const char *filename;
8546         int (*fill)(CPUArchState *cpu_env, int fd);
8547         int (*cmp)(const char *s1, const char *s2);
8548     };
8549     const struct fake_open *fake_open;
8550     static const struct fake_open fakes[] = {
8551         { "maps", open_self_maps, is_proc_myself },
8552         { "smaps", open_self_smaps, is_proc_myself },
8553         { "stat", open_self_stat, is_proc_myself },
8554         { "auxv", open_self_auxv, is_proc_myself },
8555         { "cmdline", open_self_cmdline, is_proc_myself },
8556 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8557         { "/proc/net/route", open_net_route, is_proc },
8558 #endif
8559 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8560     defined(TARGET_RISCV) || defined(TARGET_S390X)
8561         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8562 #endif
8563 #if defined(TARGET_M68K)
8564         { "/proc/hardware", open_hardware, is_proc },
8565 #endif
8566         { NULL, NULL, NULL }
8567     };
8568 
8569     if (is_proc_myself(pathname, "exe")) {
8570         if (safe) {
8571             return safe_openat(dirfd, exec_path, flags, mode);
8572         } else {
8573             return openat(dirfd, exec_path, flags, mode);
8574         }
8575     }
8576 
8577     for (fake_open = fakes; fake_open->filename; fake_open++) {
8578         if (fake_open->cmp(pathname, fake_open->filename)) {
8579             break;
8580         }
8581     }
8582 
8583     if (fake_open->filename) {
8584         const char *tmpdir;
8585         char filename[PATH_MAX];
8586         int fd, r;
8587 
8588         fd = memfd_create("qemu-open", 0);
8589         if (fd < 0) {
8590             if (errno != ENOSYS) {
8591                 return fd;
8592             }
8593             /* create temporary file to map stat to */
8594             tmpdir = getenv("TMPDIR");
8595             if (!tmpdir)
8596                 tmpdir = "/tmp";
8597             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8598             fd = mkstemp(filename);
8599             if (fd < 0) {
8600                 return fd;
8601             }
8602             unlink(filename);
8603         }
8604 
8605         if ((r = fake_open->fill(cpu_env, fd))) {
8606             int e = errno;
8607             close(fd);
8608             errno = e;
8609             return r;
8610         }
8611         lseek(fd, 0, SEEK_SET);
8612 
8613         return fd;
8614     }
8615 
8616     if (safe) {
8617         return safe_openat(dirfd, path(pathname), flags, mode);
8618     } else {
8619         return openat(dirfd, path(pathname), flags, mode);
8620     }
8621 }
8622 
8623 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8624 {
8625     ssize_t ret;
8626 
8627     if (!pathname || !buf) {
8628         errno = EFAULT;
8629         return -1;
8630     }
8631 
8632     if (!bufsiz) {
8633         /* Short circuit this for the magic exe check. */
8634         errno = EINVAL;
8635         return -1;
8636     }
8637 
8638     if (is_proc_myself((const char *)pathname, "exe")) {
8639         /*
8640          * Don't worry about sign mismatch as earlier mapping
8641          * logic would have thrown a bad address error.
8642          */
8643         ret = MIN(strlen(exec_path), bufsiz);
8644         /* We cannot NUL terminate the string. */
8645         memcpy(buf, exec_path, ret);
8646     } else {
8647         ret = readlink(path(pathname), buf, bufsiz);
8648     }
8649 
8650     return ret;
8651 }
8652 
8653 static int do_execv(CPUArchState *cpu_env, int dirfd,
8654                     abi_long pathname, abi_long guest_argp,
8655                     abi_long guest_envp, int flags, bool is_execveat)
8656 {
8657     int ret;
8658     char **argp, **envp;
8659     int argc, envc;
8660     abi_ulong gp;
8661     abi_ulong addr;
8662     char **q;
8663     void *p;
8664 
8665     argc = 0;
8666 
8667     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8668         if (get_user_ual(addr, gp)) {
8669             return -TARGET_EFAULT;
8670         }
8671         if (!addr) {
8672             break;
8673         }
8674         argc++;
8675     }
8676     envc = 0;
8677     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8678         if (get_user_ual(addr, gp)) {
8679             return -TARGET_EFAULT;
8680         }
8681         if (!addr) {
8682             break;
8683         }
8684         envc++;
8685     }
8686 
8687     argp = g_new0(char *, argc + 1);
8688     envp = g_new0(char *, envc + 1);
8689 
8690     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8691         if (get_user_ual(addr, gp)) {
8692             goto execve_efault;
8693         }
8694         if (!addr) {
8695             break;
8696         }
8697         *q = lock_user_string(addr);
8698         if (!*q) {
8699             goto execve_efault;
8700         }
8701     }
8702     *q = NULL;
8703 
8704     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8705         if (get_user_ual(addr, gp)) {
8706             goto execve_efault;
8707         }
8708         if (!addr) {
8709             break;
8710         }
8711         *q = lock_user_string(addr);
8712         if (!*q) {
8713             goto execve_efault;
8714         }
8715     }
8716     *q = NULL;
8717 
8718     /*
8719      * Although execve() is not an interruptible syscall it is
8720      * a special case where we must use the safe_syscall wrapper:
8721      * if we allow a signal to happen before we make the host
8722      * syscall then we will 'lose' it, because at the point of
8723      * execve the process leaves QEMU's control. So we use the
8724      * safe syscall wrapper to ensure that we either take the
8725      * signal as a guest signal, or else it does not happen
8726      * before the execve completes and makes it the other
8727      * program's problem.
8728      */
8729     p = lock_user_string(pathname);
8730     if (!p) {
8731         goto execve_efault;
8732     }
8733 
8734     const char *exe = p;
8735     if (is_proc_myself(p, "exe")) {
8736         exe = exec_path;
8737     }
8738     ret = is_execveat
8739         ? safe_execveat(dirfd, exe, argp, envp, flags)
8740         : safe_execve(exe, argp, envp);
8741     ret = get_errno(ret);
8742 
8743     unlock_user(p, pathname, 0);
8744 
8745     goto execve_end;
8746 
8747 execve_efault:
8748     ret = -TARGET_EFAULT;
8749 
8750 execve_end:
8751     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8752         if (get_user_ual(addr, gp) || !addr) {
8753             break;
8754         }
8755         unlock_user(*q, addr, 0);
8756     }
8757     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8758         if (get_user_ual(addr, gp) || !addr) {
8759             break;
8760         }
8761         unlock_user(*q, addr, 0);
8762     }
8763 
8764     g_free(argp);
8765     g_free(envp);
8766     return ret;
8767 }
8768 
8769 #define TIMER_MAGIC 0x0caf0000
8770 #define TIMER_MAGIC_MASK 0xffff0000
8771 
8772 /* Convert QEMU provided timer ID back to internal 16bit index format */
8773 static target_timer_t get_timer_id(abi_long arg)
8774 {
8775     target_timer_t timerid = arg;
8776 
8777     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8778         return -TARGET_EINVAL;
8779     }
8780 
8781     timerid &= 0xffff;
8782 
8783     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8784         return -TARGET_EINVAL;
8785     }
8786 
8787     return timerid;
8788 }
8789 
8790 static int target_to_host_cpu_mask(unsigned long *host_mask,
8791                                    size_t host_size,
8792                                    abi_ulong target_addr,
8793                                    size_t target_size)
8794 {
8795     unsigned target_bits = sizeof(abi_ulong) * 8;
8796     unsigned host_bits = sizeof(*host_mask) * 8;
8797     abi_ulong *target_mask;
8798     unsigned i, j;
8799 
8800     assert(host_size >= target_size);
8801 
8802     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8803     if (!target_mask) {
8804         return -TARGET_EFAULT;
8805     }
8806     memset(host_mask, 0, host_size);
8807 
8808     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8809         unsigned bit = i * target_bits;
8810         abi_ulong val;
8811 
8812         __get_user(val, &target_mask[i]);
8813         for (j = 0; j < target_bits; j++, bit++) {
8814             if (val & (1UL << j)) {
8815                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8816             }
8817         }
8818     }
8819 
8820     unlock_user(target_mask, target_addr, 0);
8821     return 0;
8822 }
8823 
8824 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8825                                    size_t host_size,
8826                                    abi_ulong target_addr,
8827                                    size_t target_size)
8828 {
8829     unsigned target_bits = sizeof(abi_ulong) * 8;
8830     unsigned host_bits = sizeof(*host_mask) * 8;
8831     abi_ulong *target_mask;
8832     unsigned i, j;
8833 
8834     assert(host_size >= target_size);
8835 
8836     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8837     if (!target_mask) {
8838         return -TARGET_EFAULT;
8839     }
8840 
8841     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8842         unsigned bit = i * target_bits;
8843         abi_ulong val = 0;
8844 
8845         for (j = 0; j < target_bits; j++, bit++) {
8846             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8847                 val |= 1UL << j;
8848             }
8849         }
8850         __put_user(val, &target_mask[i]);
8851     }
8852 
8853     unlock_user(target_mask, target_addr, target_size);
8854     return 0;
8855 }
8856 
8857 #ifdef TARGET_NR_getdents
8858 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8859 {
8860     g_autofree void *hdirp = NULL;
8861     void *tdirp;
8862     int hlen, hoff, toff;
8863     int hreclen, treclen;
8864     off64_t prev_diroff = 0;
8865 
8866     hdirp = g_try_malloc(count);
8867     if (!hdirp) {
8868         return -TARGET_ENOMEM;
8869     }
8870 
8871 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8872     hlen = sys_getdents(dirfd, hdirp, count);
8873 #else
8874     hlen = sys_getdents64(dirfd, hdirp, count);
8875 #endif
8876 
8877     hlen = get_errno(hlen);
8878     if (is_error(hlen)) {
8879         return hlen;
8880     }
8881 
8882     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8883     if (!tdirp) {
8884         return -TARGET_EFAULT;
8885     }
8886 
8887     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8888 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8889         struct linux_dirent *hde = hdirp + hoff;
8890 #else
8891         struct linux_dirent64 *hde = hdirp + hoff;
8892 #endif
8893         struct target_dirent *tde = tdirp + toff;
8894         int namelen;
8895         uint8_t type;
8896 
8897         namelen = strlen(hde->d_name);
8898         hreclen = hde->d_reclen;
8899         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8900         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8901 
8902         if (toff + treclen > count) {
8903             /*
8904              * If the host struct is smaller than the target struct, or
8905              * requires less alignment and thus packs into less space,
8906              * then the host can return more entries than we can pass
8907              * on to the guest.
8908              */
8909             if (toff == 0) {
8910                 toff = -TARGET_EINVAL; /* result buffer is too small */
8911                 break;
8912             }
8913             /*
8914              * Return what we have, resetting the file pointer to the
8915              * location of the first record not returned.
8916              */
8917             lseek64(dirfd, prev_diroff, SEEK_SET);
8918             break;
8919         }
8920 
8921         prev_diroff = hde->d_off;
8922         tde->d_ino = tswapal(hde->d_ino);
8923         tde->d_off = tswapal(hde->d_off);
8924         tde->d_reclen = tswap16(treclen);
8925         memcpy(tde->d_name, hde->d_name, namelen + 1);
8926 
8927         /*
8928          * The getdents type is in what was formerly a padding byte at the
8929          * end of the structure.
8930          */
8931 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8932         type = *((uint8_t *)hde + hreclen - 1);
8933 #else
8934         type = hde->d_type;
8935 #endif
8936         *((uint8_t *)tde + treclen - 1) = type;
8937     }
8938 
8939     unlock_user(tdirp, arg2, toff);
8940     return toff;
8941 }
8942 #endif /* TARGET_NR_getdents */
8943 
8944 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8945 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8946 {
8947     g_autofree void *hdirp = NULL;
8948     void *tdirp;
8949     int hlen, hoff, toff;
8950     int hreclen, treclen;
8951     off64_t prev_diroff = 0;
8952 
8953     hdirp = g_try_malloc(count);
8954     if (!hdirp) {
8955         return -TARGET_ENOMEM;
8956     }
8957 
8958     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8959     if (is_error(hlen)) {
8960         return hlen;
8961     }
8962 
8963     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8964     if (!tdirp) {
8965         return -TARGET_EFAULT;
8966     }
8967 
8968     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8969         struct linux_dirent64 *hde = hdirp + hoff;
8970         struct target_dirent64 *tde = tdirp + toff;
8971         int namelen;
8972 
8973         namelen = strlen(hde->d_name) + 1;
8974         hreclen = hde->d_reclen;
8975         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8976         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8977 
8978         if (toff + treclen > count) {
8979             /*
8980              * If the host struct is smaller than the target struct, or
8981              * requires less alignment and thus packs into less space,
8982              * then the host can return more entries than we can pass
8983              * on to the guest.
8984              */
8985             if (toff == 0) {
8986                 toff = -TARGET_EINVAL; /* result buffer is too small */
8987                 break;
8988             }
8989             /*
8990              * Return what we have, resetting the file pointer to the
8991              * location of the first record not returned.
8992              */
8993             lseek64(dirfd, prev_diroff, SEEK_SET);
8994             break;
8995         }
8996 
8997         prev_diroff = hde->d_off;
8998         tde->d_ino = tswap64(hde->d_ino);
8999         tde->d_off = tswap64(hde->d_off);
9000         tde->d_reclen = tswap16(treclen);
9001         tde->d_type = hde->d_type;
9002         memcpy(tde->d_name, hde->d_name, namelen);
9003     }
9004 
9005     unlock_user(tdirp, arg2, toff);
9006     return toff;
9007 }
9008 #endif /* TARGET_NR_getdents64 */
9009 
9010 #if defined(TARGET_NR_riscv_hwprobe)
9011 
9012 #define RISCV_HWPROBE_KEY_MVENDORID     0
9013 #define RISCV_HWPROBE_KEY_MARCHID       1
9014 #define RISCV_HWPROBE_KEY_MIMPID        2
9015 
9016 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9017 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9018 
9019 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
9020 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
9021 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
9022 
9023 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9024 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9025 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9026 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9027 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9028 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9029 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9030 
9031 struct riscv_hwprobe {
9032     abi_llong  key;
9033     abi_ullong value;
9034 };
9035 
9036 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9037                                     struct riscv_hwprobe *pair,
9038                                     size_t pair_count)
9039 {
9040     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9041 
9042     for (; pair_count > 0; pair_count--, pair++) {
9043         abi_llong key;
9044         abi_ullong value;
9045         __put_user(0, &pair->value);
9046         __get_user(key, &pair->key);
9047         switch (key) {
9048         case RISCV_HWPROBE_KEY_MVENDORID:
9049             __put_user(cfg->mvendorid, &pair->value);
9050             break;
9051         case RISCV_HWPROBE_KEY_MARCHID:
9052             __put_user(cfg->marchid, &pair->value);
9053             break;
9054         case RISCV_HWPROBE_KEY_MIMPID:
9055             __put_user(cfg->mimpid, &pair->value);
9056             break;
9057         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9058             value = riscv_has_ext(env, RVI) &&
9059                     riscv_has_ext(env, RVM) &&
9060                     riscv_has_ext(env, RVA) ?
9061                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9062             __put_user(value, &pair->value);
9063             break;
9064         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9065             value = riscv_has_ext(env, RVF) &&
9066                     riscv_has_ext(env, RVD) ?
9067                     RISCV_HWPROBE_IMA_FD : 0;
9068             value |= riscv_has_ext(env, RVC) ?
9069                      RISCV_HWPROBE_IMA_C : pair->value;
9070             __put_user(value, &pair->value);
9071             break;
9072         case RISCV_HWPROBE_KEY_CPUPERF_0:
9073             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9074             break;
9075         default:
9076             __put_user(-1, &pair->key);
9077             break;
9078         }
9079     }
9080 }
9081 
9082 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9083 {
9084     int ret, i, tmp;
9085     size_t host_mask_size, target_mask_size;
9086     unsigned long *host_mask;
9087 
9088     /*
9089      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9090      * arg3 contains the cpu count.
9091      */
9092     tmp = (8 * sizeof(abi_ulong));
9093     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9094     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9095                      ~(sizeof(*host_mask) - 1);
9096 
9097     host_mask = alloca(host_mask_size);
9098 
9099     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9100                                   arg4, target_mask_size);
9101     if (ret != 0) {
9102         return ret;
9103     }
9104 
9105     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9106         if (host_mask[i] != 0) {
9107             return 0;
9108         }
9109     }
9110     return -TARGET_EINVAL;
9111 }
9112 
9113 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9114                                  abi_long arg2, abi_long arg3,
9115                                  abi_long arg4, abi_long arg5)
9116 {
9117     int ret;
9118     struct riscv_hwprobe *host_pairs;
9119 
9120     /* flags must be 0 */
9121     if (arg5 != 0) {
9122         return -TARGET_EINVAL;
9123     }
9124 
9125     /* check cpu_set */
9126     if (arg3 != 0) {
9127         ret = cpu_set_valid(arg3, arg4);
9128         if (ret != 0) {
9129             return ret;
9130         }
9131     } else if (arg4 != 0) {
9132         return -TARGET_EINVAL;
9133     }
9134 
9135     /* no pairs */
9136     if (arg2 == 0) {
9137         return 0;
9138     }
9139 
9140     host_pairs = lock_user(VERIFY_WRITE, arg1,
9141                            sizeof(*host_pairs) * (size_t)arg2, 0);
9142     if (host_pairs == NULL) {
9143         return -TARGET_EFAULT;
9144     }
9145     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9146     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9147     return 0;
9148 }
9149 #endif /* TARGET_NR_riscv_hwprobe */
9150 
9151 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9152 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9153 #endif
9154 
9155 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9156 #define __NR_sys_open_tree __NR_open_tree
9157 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9158           unsigned int, __flags)
9159 #endif
9160 
9161 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9162 #define __NR_sys_move_mount __NR_move_mount
9163 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9164            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9165 #endif
9166 
9167 /* This is an internal helper for do_syscall so that it is easier
9168  * to have a single return point, so that actions, such as logging
9169  * of syscall results, can be performed.
9170  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9171  */
9172 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9173                             abi_long arg2, abi_long arg3, abi_long arg4,
9174                             abi_long arg5, abi_long arg6, abi_long arg7,
9175                             abi_long arg8)
9176 {
9177     CPUState *cpu = env_cpu(cpu_env);
9178     abi_long ret;
9179 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9180     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9181     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9182     || defined(TARGET_NR_statx)
9183     struct stat st;
9184 #endif
9185 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9186     || defined(TARGET_NR_fstatfs)
9187     struct statfs stfs;
9188 #endif
9189     void *p;
9190 
9191     switch(num) {
9192     case TARGET_NR_exit:
9193         /* In old applications this may be used to implement _exit(2).
9194            However in threaded applications it is used for thread termination,
9195            and _exit_group is used for application termination.
9196            Do thread termination if we have more then one thread.  */
9197 
9198         if (block_signals()) {
9199             return -QEMU_ERESTARTSYS;
9200         }
9201 
9202         pthread_mutex_lock(&clone_lock);
9203 
9204         if (CPU_NEXT(first_cpu)) {
9205             TaskState *ts = cpu->opaque;
9206 
9207             if (ts->child_tidptr) {
9208                 put_user_u32(0, ts->child_tidptr);
9209                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9210                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9211             }
9212 
9213             object_unparent(OBJECT(cpu));
9214             object_unref(OBJECT(cpu));
9215             /*
9216              * At this point the CPU should be unrealized and removed
9217              * from cpu lists. We can clean-up the rest of the thread
9218              * data without the lock held.
9219              */
9220 
9221             pthread_mutex_unlock(&clone_lock);
9222 
9223             thread_cpu = NULL;
9224             g_free(ts);
9225             rcu_unregister_thread();
9226             pthread_exit(NULL);
9227         }
9228 
9229         pthread_mutex_unlock(&clone_lock);
9230         preexit_cleanup(cpu_env, arg1);
9231         _exit(arg1);
9232         return 0; /* avoid warning */
9233     case TARGET_NR_read:
9234         if (arg2 == 0 && arg3 == 0) {
9235             return get_errno(safe_read(arg1, 0, 0));
9236         } else {
9237             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9238                 return -TARGET_EFAULT;
9239             ret = get_errno(safe_read(arg1, p, arg3));
9240             if (ret >= 0 &&
9241                 fd_trans_host_to_target_data(arg1)) {
9242                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9243             }
9244             unlock_user(p, arg2, ret);
9245         }
9246         return ret;
9247     case TARGET_NR_write:
9248         if (arg2 == 0 && arg3 == 0) {
9249             return get_errno(safe_write(arg1, 0, 0));
9250         }
9251         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9252             return -TARGET_EFAULT;
9253         if (fd_trans_target_to_host_data(arg1)) {
9254             void *copy = g_malloc(arg3);
9255             memcpy(copy, p, arg3);
9256             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9257             if (ret >= 0) {
9258                 ret = get_errno(safe_write(arg1, copy, ret));
9259             }
9260             g_free(copy);
9261         } else {
9262             ret = get_errno(safe_write(arg1, p, arg3));
9263         }
9264         unlock_user(p, arg2, 0);
9265         return ret;
9266 
9267 #ifdef TARGET_NR_open
9268     case TARGET_NR_open:
9269         if (!(p = lock_user_string(arg1)))
9270             return -TARGET_EFAULT;
9271         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9272                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9273                                   arg3, true));
9274         fd_trans_unregister(ret);
9275         unlock_user(p, arg1, 0);
9276         return ret;
9277 #endif
9278     case TARGET_NR_openat:
9279         if (!(p = lock_user_string(arg2)))
9280             return -TARGET_EFAULT;
9281         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9282                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9283                                   arg4, true));
9284         fd_trans_unregister(ret);
9285         unlock_user(p, arg2, 0);
9286         return ret;
9287 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9288     case TARGET_NR_name_to_handle_at:
9289         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9290         return ret;
9291 #endif
9292 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9293     case TARGET_NR_open_by_handle_at:
9294         ret = do_open_by_handle_at(arg1, arg2, arg3);
9295         fd_trans_unregister(ret);
9296         return ret;
9297 #endif
9298 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9299     case TARGET_NR_pidfd_open:
9300         return get_errno(pidfd_open(arg1, arg2));
9301 #endif
9302 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9303     case TARGET_NR_pidfd_send_signal:
9304         {
9305             siginfo_t uinfo, *puinfo;
9306 
9307             if (arg3) {
9308                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9309                 if (!p) {
9310                     return -TARGET_EFAULT;
9311                  }
9312                  target_to_host_siginfo(&uinfo, p);
9313                  unlock_user(p, arg3, 0);
9314                  puinfo = &uinfo;
9315             } else {
9316                  puinfo = NULL;
9317             }
9318             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9319                                               puinfo, arg4));
9320         }
9321         return ret;
9322 #endif
9323 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9324     case TARGET_NR_pidfd_getfd:
9325         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9326 #endif
9327     case TARGET_NR_close:
9328         fd_trans_unregister(arg1);
9329         return get_errno(close(arg1));
9330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9331     case TARGET_NR_close_range:
9332         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9333         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9334             abi_long fd, maxfd;
9335             maxfd = MIN(arg2, target_fd_max);
9336             for (fd = arg1; fd < maxfd; fd++) {
9337                 fd_trans_unregister(fd);
9338             }
9339         }
9340         return ret;
9341 #endif
9342 
9343     case TARGET_NR_brk:
9344         return do_brk(arg1);
9345 #ifdef TARGET_NR_fork
9346     case TARGET_NR_fork:
9347         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9348 #endif
9349 #ifdef TARGET_NR_waitpid
9350     case TARGET_NR_waitpid:
9351         {
9352             int status;
9353             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9354             if (!is_error(ret) && arg2 && ret
9355                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9356                 return -TARGET_EFAULT;
9357         }
9358         return ret;
9359 #endif
9360 #ifdef TARGET_NR_waitid
9361     case TARGET_NR_waitid:
9362         {
9363             siginfo_t info;
9364             info.si_pid = 0;
9365             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9366             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9367                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9368                     return -TARGET_EFAULT;
9369                 host_to_target_siginfo(p, &info);
9370                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9371             }
9372         }
9373         return ret;
9374 #endif
9375 #ifdef TARGET_NR_creat /* not on alpha */
9376     case TARGET_NR_creat:
9377         if (!(p = lock_user_string(arg1)))
9378             return -TARGET_EFAULT;
9379         ret = get_errno(creat(p, arg2));
9380         fd_trans_unregister(ret);
9381         unlock_user(p, arg1, 0);
9382         return ret;
9383 #endif
9384 #ifdef TARGET_NR_link
9385     case TARGET_NR_link:
9386         {
9387             void * p2;
9388             p = lock_user_string(arg1);
9389             p2 = lock_user_string(arg2);
9390             if (!p || !p2)
9391                 ret = -TARGET_EFAULT;
9392             else
9393                 ret = get_errno(link(p, p2));
9394             unlock_user(p2, arg2, 0);
9395             unlock_user(p, arg1, 0);
9396         }
9397         return ret;
9398 #endif
9399 #if defined(TARGET_NR_linkat)
9400     case TARGET_NR_linkat:
9401         {
9402             void * p2 = NULL;
9403             if (!arg2 || !arg4)
9404                 return -TARGET_EFAULT;
9405             p  = lock_user_string(arg2);
9406             p2 = lock_user_string(arg4);
9407             if (!p || !p2)
9408                 ret = -TARGET_EFAULT;
9409             else
9410                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9411             unlock_user(p, arg2, 0);
9412             unlock_user(p2, arg4, 0);
9413         }
9414         return ret;
9415 #endif
9416 #ifdef TARGET_NR_unlink
9417     case TARGET_NR_unlink:
9418         if (!(p = lock_user_string(arg1)))
9419             return -TARGET_EFAULT;
9420         ret = get_errno(unlink(p));
9421         unlock_user(p, arg1, 0);
9422         return ret;
9423 #endif
9424 #if defined(TARGET_NR_unlinkat)
9425     case TARGET_NR_unlinkat:
9426         if (!(p = lock_user_string(arg2)))
9427             return -TARGET_EFAULT;
9428         ret = get_errno(unlinkat(arg1, p, arg3));
9429         unlock_user(p, arg2, 0);
9430         return ret;
9431 #endif
9432     case TARGET_NR_execveat:
9433         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9434     case TARGET_NR_execve:
9435         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9436     case TARGET_NR_chdir:
9437         if (!(p = lock_user_string(arg1)))
9438             return -TARGET_EFAULT;
9439         ret = get_errno(chdir(p));
9440         unlock_user(p, arg1, 0);
9441         return ret;
9442 #ifdef TARGET_NR_time
9443     case TARGET_NR_time:
9444         {
9445             time_t host_time;
9446             ret = get_errno(time(&host_time));
9447             if (!is_error(ret)
9448                 && arg1
9449                 && put_user_sal(host_time, arg1))
9450                 return -TARGET_EFAULT;
9451         }
9452         return ret;
9453 #endif
9454 #ifdef TARGET_NR_mknod
9455     case TARGET_NR_mknod:
9456         if (!(p = lock_user_string(arg1)))
9457             return -TARGET_EFAULT;
9458         ret = get_errno(mknod(p, arg2, arg3));
9459         unlock_user(p, arg1, 0);
9460         return ret;
9461 #endif
9462 #if defined(TARGET_NR_mknodat)
9463     case TARGET_NR_mknodat:
9464         if (!(p = lock_user_string(arg2)))
9465             return -TARGET_EFAULT;
9466         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9467         unlock_user(p, arg2, 0);
9468         return ret;
9469 #endif
9470 #ifdef TARGET_NR_chmod
9471     case TARGET_NR_chmod:
9472         if (!(p = lock_user_string(arg1)))
9473             return -TARGET_EFAULT;
9474         ret = get_errno(chmod(p, arg2));
9475         unlock_user(p, arg1, 0);
9476         return ret;
9477 #endif
9478 #ifdef TARGET_NR_lseek
9479     case TARGET_NR_lseek:
9480         return get_errno(lseek(arg1, arg2, arg3));
9481 #endif
9482 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9483     /* Alpha specific */
9484     case TARGET_NR_getxpid:
9485         cpu_env->ir[IR_A4] = getppid();
9486         return get_errno(getpid());
9487 #endif
9488 #ifdef TARGET_NR_getpid
9489     case TARGET_NR_getpid:
9490         return get_errno(getpid());
9491 #endif
9492     case TARGET_NR_mount:
9493         {
9494             /* need to look at the data field */
9495             void *p2, *p3;
9496 
9497             if (arg1) {
9498                 p = lock_user_string(arg1);
9499                 if (!p) {
9500                     return -TARGET_EFAULT;
9501                 }
9502             } else {
9503                 p = NULL;
9504             }
9505 
9506             p2 = lock_user_string(arg2);
9507             if (!p2) {
9508                 if (arg1) {
9509                     unlock_user(p, arg1, 0);
9510                 }
9511                 return -TARGET_EFAULT;
9512             }
9513 
9514             if (arg3) {
9515                 p3 = lock_user_string(arg3);
9516                 if (!p3) {
9517                     if (arg1) {
9518                         unlock_user(p, arg1, 0);
9519                     }
9520                     unlock_user(p2, arg2, 0);
9521                     return -TARGET_EFAULT;
9522                 }
9523             } else {
9524                 p3 = NULL;
9525             }
9526 
9527             /* FIXME - arg5 should be locked, but it isn't clear how to
9528              * do that since it's not guaranteed to be a NULL-terminated
9529              * string.
9530              */
9531             if (!arg5) {
9532                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9533             } else {
9534                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9535             }
9536             ret = get_errno(ret);
9537 
9538             if (arg1) {
9539                 unlock_user(p, arg1, 0);
9540             }
9541             unlock_user(p2, arg2, 0);
9542             if (arg3) {
9543                 unlock_user(p3, arg3, 0);
9544             }
9545         }
9546         return ret;
9547 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9548 #if defined(TARGET_NR_umount)
9549     case TARGET_NR_umount:
9550 #endif
9551 #if defined(TARGET_NR_oldumount)
9552     case TARGET_NR_oldumount:
9553 #endif
9554         if (!(p = lock_user_string(arg1)))
9555             return -TARGET_EFAULT;
9556         ret = get_errno(umount(p));
9557         unlock_user(p, arg1, 0);
9558         return ret;
9559 #endif
9560 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9561     case TARGET_NR_move_mount:
9562         {
9563             void *p2, *p4;
9564 
9565             if (!arg2 || !arg4) {
9566                 return -TARGET_EFAULT;
9567             }
9568 
9569             p2 = lock_user_string(arg2);
9570             if (!p2) {
9571                 return -TARGET_EFAULT;
9572             }
9573 
9574             p4 = lock_user_string(arg4);
9575             if (!p4) {
9576                 unlock_user(p2, arg2, 0);
9577                 return -TARGET_EFAULT;
9578             }
9579             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9580 
9581             unlock_user(p2, arg2, 0);
9582             unlock_user(p4, arg4, 0);
9583 
9584             return ret;
9585         }
9586 #endif
9587 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9588     case TARGET_NR_open_tree:
9589         {
9590             void *p2;
9591             int host_flags;
9592 
9593             if (!arg2) {
9594                 return -TARGET_EFAULT;
9595             }
9596 
9597             p2 = lock_user_string(arg2);
9598             if (!p2) {
9599                 return -TARGET_EFAULT;
9600             }
9601 
9602             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9603             if (arg3 & TARGET_O_CLOEXEC) {
9604                 host_flags |= O_CLOEXEC;
9605             }
9606 
9607             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9608 
9609             unlock_user(p2, arg2, 0);
9610 
9611             return ret;
9612         }
9613 #endif
9614 #ifdef TARGET_NR_stime /* not on alpha */
9615     case TARGET_NR_stime:
9616         {
9617             struct timespec ts;
9618             ts.tv_nsec = 0;
9619             if (get_user_sal(ts.tv_sec, arg1)) {
9620                 return -TARGET_EFAULT;
9621             }
9622             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9623         }
9624 #endif
9625 #ifdef TARGET_NR_alarm /* not on alpha */
9626     case TARGET_NR_alarm:
9627         return alarm(arg1);
9628 #endif
9629 #ifdef TARGET_NR_pause /* not on alpha */
9630     case TARGET_NR_pause:
9631         if (!block_signals()) {
9632             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9633         }
9634         return -TARGET_EINTR;
9635 #endif
9636 #ifdef TARGET_NR_utime
9637     case TARGET_NR_utime:
9638         {
9639             struct utimbuf tbuf, *host_tbuf;
9640             struct target_utimbuf *target_tbuf;
9641             if (arg2) {
9642                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9643                     return -TARGET_EFAULT;
9644                 tbuf.actime = tswapal(target_tbuf->actime);
9645                 tbuf.modtime = tswapal(target_tbuf->modtime);
9646                 unlock_user_struct(target_tbuf, arg2, 0);
9647                 host_tbuf = &tbuf;
9648             } else {
9649                 host_tbuf = NULL;
9650             }
9651             if (!(p = lock_user_string(arg1)))
9652                 return -TARGET_EFAULT;
9653             ret = get_errno(utime(p, host_tbuf));
9654             unlock_user(p, arg1, 0);
9655         }
9656         return ret;
9657 #endif
9658 #ifdef TARGET_NR_utimes
9659     case TARGET_NR_utimes:
9660         {
9661             struct timeval *tvp, tv[2];
9662             if (arg2) {
9663                 if (copy_from_user_timeval(&tv[0], arg2)
9664                     || copy_from_user_timeval(&tv[1],
9665                                               arg2 + sizeof(struct target_timeval)))
9666                     return -TARGET_EFAULT;
9667                 tvp = tv;
9668             } else {
9669                 tvp = NULL;
9670             }
9671             if (!(p = lock_user_string(arg1)))
9672                 return -TARGET_EFAULT;
9673             ret = get_errno(utimes(p, tvp));
9674             unlock_user(p, arg1, 0);
9675         }
9676         return ret;
9677 #endif
9678 #if defined(TARGET_NR_futimesat)
9679     case TARGET_NR_futimesat:
9680         {
9681             struct timeval *tvp, tv[2];
9682             if (arg3) {
9683                 if (copy_from_user_timeval(&tv[0], arg3)
9684                     || copy_from_user_timeval(&tv[1],
9685                                               arg3 + sizeof(struct target_timeval)))
9686                     return -TARGET_EFAULT;
9687                 tvp = tv;
9688             } else {
9689                 tvp = NULL;
9690             }
9691             if (!(p = lock_user_string(arg2))) {
9692                 return -TARGET_EFAULT;
9693             }
9694             ret = get_errno(futimesat(arg1, path(p), tvp));
9695             unlock_user(p, arg2, 0);
9696         }
9697         return ret;
9698 #endif
9699 #ifdef TARGET_NR_access
9700     case TARGET_NR_access:
9701         if (!(p = lock_user_string(arg1))) {
9702             return -TARGET_EFAULT;
9703         }
9704         ret = get_errno(access(path(p), arg2));
9705         unlock_user(p, arg1, 0);
9706         return ret;
9707 #endif
9708 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9709     case TARGET_NR_faccessat:
9710         if (!(p = lock_user_string(arg2))) {
9711             return -TARGET_EFAULT;
9712         }
9713         ret = get_errno(faccessat(arg1, p, arg3, 0));
9714         unlock_user(p, arg2, 0);
9715         return ret;
9716 #endif
9717 #if defined(TARGET_NR_faccessat2)
9718     case TARGET_NR_faccessat2:
9719         if (!(p = lock_user_string(arg2))) {
9720             return -TARGET_EFAULT;
9721         }
9722         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9723         unlock_user(p, arg2, 0);
9724         return ret;
9725 #endif
9726 #ifdef TARGET_NR_nice /* not on alpha */
9727     case TARGET_NR_nice:
9728         return get_errno(nice(arg1));
9729 #endif
9730     case TARGET_NR_sync:
9731         sync();
9732         return 0;
9733 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9734     case TARGET_NR_syncfs:
9735         return get_errno(syncfs(arg1));
9736 #endif
9737     case TARGET_NR_kill:
9738         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9739 #ifdef TARGET_NR_rename
9740     case TARGET_NR_rename:
9741         {
9742             void *p2;
9743             p = lock_user_string(arg1);
9744             p2 = lock_user_string(arg2);
9745             if (!p || !p2)
9746                 ret = -TARGET_EFAULT;
9747             else
9748                 ret = get_errno(rename(p, p2));
9749             unlock_user(p2, arg2, 0);
9750             unlock_user(p, arg1, 0);
9751         }
9752         return ret;
9753 #endif
9754 #if defined(TARGET_NR_renameat)
9755     case TARGET_NR_renameat:
9756         {
9757             void *p2;
9758             p  = lock_user_string(arg2);
9759             p2 = lock_user_string(arg4);
9760             if (!p || !p2)
9761                 ret = -TARGET_EFAULT;
9762             else
9763                 ret = get_errno(renameat(arg1, p, arg3, p2));
9764             unlock_user(p2, arg4, 0);
9765             unlock_user(p, arg2, 0);
9766         }
9767         return ret;
9768 #endif
9769 #if defined(TARGET_NR_renameat2)
9770     case TARGET_NR_renameat2:
9771         {
9772             void *p2;
9773             p  = lock_user_string(arg2);
9774             p2 = lock_user_string(arg4);
9775             if (!p || !p2) {
9776                 ret = -TARGET_EFAULT;
9777             } else {
9778                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9779             }
9780             unlock_user(p2, arg4, 0);
9781             unlock_user(p, arg2, 0);
9782         }
9783         return ret;
9784 #endif
9785 #ifdef TARGET_NR_mkdir
9786     case TARGET_NR_mkdir:
9787         if (!(p = lock_user_string(arg1)))
9788             return -TARGET_EFAULT;
9789         ret = get_errno(mkdir(p, arg2));
9790         unlock_user(p, arg1, 0);
9791         return ret;
9792 #endif
9793 #if defined(TARGET_NR_mkdirat)
9794     case TARGET_NR_mkdirat:
9795         if (!(p = lock_user_string(arg2)))
9796             return -TARGET_EFAULT;
9797         ret = get_errno(mkdirat(arg1, p, arg3));
9798         unlock_user(p, arg2, 0);
9799         return ret;
9800 #endif
9801 #ifdef TARGET_NR_rmdir
9802     case TARGET_NR_rmdir:
9803         if (!(p = lock_user_string(arg1)))
9804             return -TARGET_EFAULT;
9805         ret = get_errno(rmdir(p));
9806         unlock_user(p, arg1, 0);
9807         return ret;
9808 #endif
9809     case TARGET_NR_dup:
9810         ret = get_errno(dup(arg1));
9811         if (ret >= 0) {
9812             fd_trans_dup(arg1, ret);
9813         }
9814         return ret;
9815 #ifdef TARGET_NR_pipe
9816     case TARGET_NR_pipe:
9817         return do_pipe(cpu_env, arg1, 0, 0);
9818 #endif
9819 #ifdef TARGET_NR_pipe2
9820     case TARGET_NR_pipe2:
9821         return do_pipe(cpu_env, arg1,
9822                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9823 #endif
9824     case TARGET_NR_times:
9825         {
9826             struct target_tms *tmsp;
9827             struct tms tms;
9828             ret = get_errno(times(&tms));
9829             if (arg1) {
9830                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9831                 if (!tmsp)
9832                     return -TARGET_EFAULT;
9833                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9834                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9835                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9836                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9837             }
9838             if (!is_error(ret))
9839                 ret = host_to_target_clock_t(ret);
9840         }
9841         return ret;
9842     case TARGET_NR_acct:
9843         if (arg1 == 0) {
9844             ret = get_errno(acct(NULL));
9845         } else {
9846             if (!(p = lock_user_string(arg1))) {
9847                 return -TARGET_EFAULT;
9848             }
9849             ret = get_errno(acct(path(p)));
9850             unlock_user(p, arg1, 0);
9851         }
9852         return ret;
9853 #ifdef TARGET_NR_umount2
9854     case TARGET_NR_umount2:
9855         if (!(p = lock_user_string(arg1)))
9856             return -TARGET_EFAULT;
9857         ret = get_errno(umount2(p, arg2));
9858         unlock_user(p, arg1, 0);
9859         return ret;
9860 #endif
9861     case TARGET_NR_ioctl:
9862         return do_ioctl(arg1, arg2, arg3);
9863 #ifdef TARGET_NR_fcntl
9864     case TARGET_NR_fcntl:
9865         return do_fcntl(arg1, arg2, arg3);
9866 #endif
9867     case TARGET_NR_setpgid:
9868         return get_errno(setpgid(arg1, arg2));
9869     case TARGET_NR_umask:
9870         return get_errno(umask(arg1));
9871     case TARGET_NR_chroot:
9872         if (!(p = lock_user_string(arg1)))
9873             return -TARGET_EFAULT;
9874         ret = get_errno(chroot(p));
9875         unlock_user(p, arg1, 0);
9876         return ret;
9877 #ifdef TARGET_NR_dup2
9878     case TARGET_NR_dup2:
9879         ret = get_errno(dup2(arg1, arg2));
9880         if (ret >= 0) {
9881             fd_trans_dup(arg1, arg2);
9882         }
9883         return ret;
9884 #endif
9885 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9886     case TARGET_NR_dup3:
9887     {
9888         int host_flags;
9889 
9890         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9891             return -EINVAL;
9892         }
9893         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9894         ret = get_errno(dup3(arg1, arg2, host_flags));
9895         if (ret >= 0) {
9896             fd_trans_dup(arg1, arg2);
9897         }
9898         return ret;
9899     }
9900 #endif
9901 #ifdef TARGET_NR_getppid /* not on alpha */
9902     case TARGET_NR_getppid:
9903         return get_errno(getppid());
9904 #endif
9905 #ifdef TARGET_NR_getpgrp
9906     case TARGET_NR_getpgrp:
9907         return get_errno(getpgrp());
9908 #endif
9909     case TARGET_NR_setsid:
9910         return get_errno(setsid());
9911 #ifdef TARGET_NR_sigaction
9912     case TARGET_NR_sigaction:
9913         {
9914 #if defined(TARGET_MIPS)
9915 	    struct target_sigaction act, oact, *pact, *old_act;
9916 
9917 	    if (arg2) {
9918                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9919                     return -TARGET_EFAULT;
9920 		act._sa_handler = old_act->_sa_handler;
9921 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9922 		act.sa_flags = old_act->sa_flags;
9923 		unlock_user_struct(old_act, arg2, 0);
9924 		pact = &act;
9925 	    } else {
9926 		pact = NULL;
9927 	    }
9928 
9929         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9930 
9931 	    if (!is_error(ret) && arg3) {
9932                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9933                     return -TARGET_EFAULT;
9934 		old_act->_sa_handler = oact._sa_handler;
9935 		old_act->sa_flags = oact.sa_flags;
9936 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9937 		old_act->sa_mask.sig[1] = 0;
9938 		old_act->sa_mask.sig[2] = 0;
9939 		old_act->sa_mask.sig[3] = 0;
9940 		unlock_user_struct(old_act, arg3, 1);
9941 	    }
9942 #else
9943             struct target_old_sigaction *old_act;
9944             struct target_sigaction act, oact, *pact;
9945             if (arg2) {
9946                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9947                     return -TARGET_EFAULT;
9948                 act._sa_handler = old_act->_sa_handler;
9949                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9950                 act.sa_flags = old_act->sa_flags;
9951 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9952                 act.sa_restorer = old_act->sa_restorer;
9953 #endif
9954                 unlock_user_struct(old_act, arg2, 0);
9955                 pact = &act;
9956             } else {
9957                 pact = NULL;
9958             }
9959             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9960             if (!is_error(ret) && arg3) {
9961                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9962                     return -TARGET_EFAULT;
9963                 old_act->_sa_handler = oact._sa_handler;
9964                 old_act->sa_mask = oact.sa_mask.sig[0];
9965                 old_act->sa_flags = oact.sa_flags;
9966 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9967                 old_act->sa_restorer = oact.sa_restorer;
9968 #endif
9969                 unlock_user_struct(old_act, arg3, 1);
9970             }
9971 #endif
9972         }
9973         return ret;
9974 #endif
9975     case TARGET_NR_rt_sigaction:
9976         {
9977             /*
9978              * For Alpha and SPARC this is a 5 argument syscall, with
9979              * a 'restorer' parameter which must be copied into the
9980              * sa_restorer field of the sigaction struct.
9981              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9982              * and arg5 is the sigsetsize.
9983              */
9984 #if defined(TARGET_ALPHA)
9985             target_ulong sigsetsize = arg4;
9986             target_ulong restorer = arg5;
9987 #elif defined(TARGET_SPARC)
9988             target_ulong restorer = arg4;
9989             target_ulong sigsetsize = arg5;
9990 #else
9991             target_ulong sigsetsize = arg4;
9992             target_ulong restorer = 0;
9993 #endif
9994             struct target_sigaction *act = NULL;
9995             struct target_sigaction *oact = NULL;
9996 
9997             if (sigsetsize != sizeof(target_sigset_t)) {
9998                 return -TARGET_EINVAL;
9999             }
10000             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10001                 return -TARGET_EFAULT;
10002             }
10003             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10004                 ret = -TARGET_EFAULT;
10005             } else {
10006                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10007                 if (oact) {
10008                     unlock_user_struct(oact, arg3, 1);
10009                 }
10010             }
10011             if (act) {
10012                 unlock_user_struct(act, arg2, 0);
10013             }
10014         }
10015         return ret;
10016 #ifdef TARGET_NR_sgetmask /* not on alpha */
10017     case TARGET_NR_sgetmask:
10018         {
10019             sigset_t cur_set;
10020             abi_ulong target_set;
10021             ret = do_sigprocmask(0, NULL, &cur_set);
10022             if (!ret) {
10023                 host_to_target_old_sigset(&target_set, &cur_set);
10024                 ret = target_set;
10025             }
10026         }
10027         return ret;
10028 #endif
10029 #ifdef TARGET_NR_ssetmask /* not on alpha */
10030     case TARGET_NR_ssetmask:
10031         {
10032             sigset_t set, oset;
10033             abi_ulong target_set = arg1;
10034             target_to_host_old_sigset(&set, &target_set);
10035             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10036             if (!ret) {
10037                 host_to_target_old_sigset(&target_set, &oset);
10038                 ret = target_set;
10039             }
10040         }
10041         return ret;
10042 #endif
10043 #ifdef TARGET_NR_sigprocmask
10044     case TARGET_NR_sigprocmask:
10045         {
10046 #if defined(TARGET_ALPHA)
10047             sigset_t set, oldset;
10048             abi_ulong mask;
10049             int how;
10050 
10051             switch (arg1) {
10052             case TARGET_SIG_BLOCK:
10053                 how = SIG_BLOCK;
10054                 break;
10055             case TARGET_SIG_UNBLOCK:
10056                 how = SIG_UNBLOCK;
10057                 break;
10058             case TARGET_SIG_SETMASK:
10059                 how = SIG_SETMASK;
10060                 break;
10061             default:
10062                 return -TARGET_EINVAL;
10063             }
10064             mask = arg2;
10065             target_to_host_old_sigset(&set, &mask);
10066 
10067             ret = do_sigprocmask(how, &set, &oldset);
10068             if (!is_error(ret)) {
10069                 host_to_target_old_sigset(&mask, &oldset);
10070                 ret = mask;
10071                 cpu_env->ir[IR_V0] = 0; /* force no error */
10072             }
10073 #else
10074             sigset_t set, oldset, *set_ptr;
10075             int how;
10076 
10077             if (arg2) {
10078                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10079                 if (!p) {
10080                     return -TARGET_EFAULT;
10081                 }
10082                 target_to_host_old_sigset(&set, p);
10083                 unlock_user(p, arg2, 0);
10084                 set_ptr = &set;
10085                 switch (arg1) {
10086                 case TARGET_SIG_BLOCK:
10087                     how = SIG_BLOCK;
10088                     break;
10089                 case TARGET_SIG_UNBLOCK:
10090                     how = SIG_UNBLOCK;
10091                     break;
10092                 case TARGET_SIG_SETMASK:
10093                     how = SIG_SETMASK;
10094                     break;
10095                 default:
10096                     return -TARGET_EINVAL;
10097                 }
10098             } else {
10099                 how = 0;
10100                 set_ptr = NULL;
10101             }
10102             ret = do_sigprocmask(how, set_ptr, &oldset);
10103             if (!is_error(ret) && arg3) {
10104                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10105                     return -TARGET_EFAULT;
10106                 host_to_target_old_sigset(p, &oldset);
10107                 unlock_user(p, arg3, sizeof(target_sigset_t));
10108             }
10109 #endif
10110         }
10111         return ret;
10112 #endif
10113     case TARGET_NR_rt_sigprocmask:
10114         {
10115             int how = arg1;
10116             sigset_t set, oldset, *set_ptr;
10117 
10118             if (arg4 != sizeof(target_sigset_t)) {
10119                 return -TARGET_EINVAL;
10120             }
10121 
10122             if (arg2) {
10123                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10124                 if (!p) {
10125                     return -TARGET_EFAULT;
10126                 }
10127                 target_to_host_sigset(&set, p);
10128                 unlock_user(p, arg2, 0);
10129                 set_ptr = &set;
10130                 switch(how) {
10131                 case TARGET_SIG_BLOCK:
10132                     how = SIG_BLOCK;
10133                     break;
10134                 case TARGET_SIG_UNBLOCK:
10135                     how = SIG_UNBLOCK;
10136                     break;
10137                 case TARGET_SIG_SETMASK:
10138                     how = SIG_SETMASK;
10139                     break;
10140                 default:
10141                     return -TARGET_EINVAL;
10142                 }
10143             } else {
10144                 how = 0;
10145                 set_ptr = NULL;
10146             }
10147             ret = do_sigprocmask(how, set_ptr, &oldset);
10148             if (!is_error(ret) && arg3) {
10149                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10150                     return -TARGET_EFAULT;
10151                 host_to_target_sigset(p, &oldset);
10152                 unlock_user(p, arg3, sizeof(target_sigset_t));
10153             }
10154         }
10155         return ret;
10156 #ifdef TARGET_NR_sigpending
10157     case TARGET_NR_sigpending:
10158         {
10159             sigset_t set;
10160             ret = get_errno(sigpending(&set));
10161             if (!is_error(ret)) {
10162                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10163                     return -TARGET_EFAULT;
10164                 host_to_target_old_sigset(p, &set);
10165                 unlock_user(p, arg1, sizeof(target_sigset_t));
10166             }
10167         }
10168         return ret;
10169 #endif
10170     case TARGET_NR_rt_sigpending:
10171         {
10172             sigset_t set;
10173 
10174             /* Yes, this check is >, not != like most. We follow the kernel's
10175              * logic and it does it like this because it implements
10176              * NR_sigpending through the same code path, and in that case
10177              * the old_sigset_t is smaller in size.
10178              */
10179             if (arg2 > sizeof(target_sigset_t)) {
10180                 return -TARGET_EINVAL;
10181             }
10182 
10183             ret = get_errno(sigpending(&set));
10184             if (!is_error(ret)) {
10185                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10186                     return -TARGET_EFAULT;
10187                 host_to_target_sigset(p, &set);
10188                 unlock_user(p, arg1, sizeof(target_sigset_t));
10189             }
10190         }
10191         return ret;
10192 #ifdef TARGET_NR_sigsuspend
10193     case TARGET_NR_sigsuspend:
10194         {
10195             sigset_t *set;
10196 
10197 #if defined(TARGET_ALPHA)
10198             TaskState *ts = cpu->opaque;
10199             /* target_to_host_old_sigset will bswap back */
10200             abi_ulong mask = tswapal(arg1);
10201             set = &ts->sigsuspend_mask;
10202             target_to_host_old_sigset(set, &mask);
10203 #else
10204             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10205             if (ret != 0) {
10206                 return ret;
10207             }
10208 #endif
10209             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10210             finish_sigsuspend_mask(ret);
10211         }
10212         return ret;
10213 #endif
10214     case TARGET_NR_rt_sigsuspend:
10215         {
10216             sigset_t *set;
10217 
10218             ret = process_sigsuspend_mask(&set, arg1, arg2);
10219             if (ret != 0) {
10220                 return ret;
10221             }
10222             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10223             finish_sigsuspend_mask(ret);
10224         }
10225         return ret;
10226 #ifdef TARGET_NR_rt_sigtimedwait
10227     case TARGET_NR_rt_sigtimedwait:
10228         {
10229             sigset_t set;
10230             struct timespec uts, *puts;
10231             siginfo_t uinfo;
10232 
10233             if (arg4 != sizeof(target_sigset_t)) {
10234                 return -TARGET_EINVAL;
10235             }
10236 
10237             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10238                 return -TARGET_EFAULT;
10239             target_to_host_sigset(&set, p);
10240             unlock_user(p, arg1, 0);
10241             if (arg3) {
10242                 puts = &uts;
10243                 if (target_to_host_timespec(puts, arg3)) {
10244                     return -TARGET_EFAULT;
10245                 }
10246             } else {
10247                 puts = NULL;
10248             }
10249             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10250                                                  SIGSET_T_SIZE));
10251             if (!is_error(ret)) {
10252                 if (arg2) {
10253                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10254                                   0);
10255                     if (!p) {
10256                         return -TARGET_EFAULT;
10257                     }
10258                     host_to_target_siginfo(p, &uinfo);
10259                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10260                 }
10261                 ret = host_to_target_signal(ret);
10262             }
10263         }
10264         return ret;
10265 #endif
10266 #ifdef TARGET_NR_rt_sigtimedwait_time64
10267     case TARGET_NR_rt_sigtimedwait_time64:
10268         {
10269             sigset_t set;
10270             struct timespec uts, *puts;
10271             siginfo_t uinfo;
10272 
10273             if (arg4 != sizeof(target_sigset_t)) {
10274                 return -TARGET_EINVAL;
10275             }
10276 
10277             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10278             if (!p) {
10279                 return -TARGET_EFAULT;
10280             }
10281             target_to_host_sigset(&set, p);
10282             unlock_user(p, arg1, 0);
10283             if (arg3) {
10284                 puts = &uts;
10285                 if (target_to_host_timespec64(puts, arg3)) {
10286                     return -TARGET_EFAULT;
10287                 }
10288             } else {
10289                 puts = NULL;
10290             }
10291             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10292                                                  SIGSET_T_SIZE));
10293             if (!is_error(ret)) {
10294                 if (arg2) {
10295                     p = lock_user(VERIFY_WRITE, arg2,
10296                                   sizeof(target_siginfo_t), 0);
10297                     if (!p) {
10298                         return -TARGET_EFAULT;
10299                     }
10300                     host_to_target_siginfo(p, &uinfo);
10301                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10302                 }
10303                 ret = host_to_target_signal(ret);
10304             }
10305         }
10306         return ret;
10307 #endif
10308     case TARGET_NR_rt_sigqueueinfo:
10309         {
10310             siginfo_t uinfo;
10311 
10312             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10313             if (!p) {
10314                 return -TARGET_EFAULT;
10315             }
10316             target_to_host_siginfo(&uinfo, p);
10317             unlock_user(p, arg3, 0);
10318             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10319         }
10320         return ret;
10321     case TARGET_NR_rt_tgsigqueueinfo:
10322         {
10323             siginfo_t uinfo;
10324 
10325             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10326             if (!p) {
10327                 return -TARGET_EFAULT;
10328             }
10329             target_to_host_siginfo(&uinfo, p);
10330             unlock_user(p, arg4, 0);
10331             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10332         }
10333         return ret;
10334 #ifdef TARGET_NR_sigreturn
10335     case TARGET_NR_sigreturn:
10336         if (block_signals()) {
10337             return -QEMU_ERESTARTSYS;
10338         }
10339         return do_sigreturn(cpu_env);
10340 #endif
10341     case TARGET_NR_rt_sigreturn:
10342         if (block_signals()) {
10343             return -QEMU_ERESTARTSYS;
10344         }
10345         return do_rt_sigreturn(cpu_env);
10346     case TARGET_NR_sethostname:
10347         if (!(p = lock_user_string(arg1)))
10348             return -TARGET_EFAULT;
10349         ret = get_errno(sethostname(p, arg2));
10350         unlock_user(p, arg1, 0);
10351         return ret;
10352 #ifdef TARGET_NR_setrlimit
10353     case TARGET_NR_setrlimit:
10354         {
10355             int resource = target_to_host_resource(arg1);
10356             struct target_rlimit *target_rlim;
10357             struct rlimit rlim;
10358             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10359                 return -TARGET_EFAULT;
10360             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10361             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10362             unlock_user_struct(target_rlim, arg2, 0);
10363             /*
10364              * If we just passed through resource limit settings for memory then
10365              * they would also apply to QEMU's own allocations, and QEMU will
10366              * crash or hang or die if its allocations fail. Ideally we would
10367              * track the guest allocations in QEMU and apply the limits ourselves.
10368              * For now, just tell the guest the call succeeded but don't actually
10369              * limit anything.
10370              */
10371             if (resource != RLIMIT_AS &&
10372                 resource != RLIMIT_DATA &&
10373                 resource != RLIMIT_STACK) {
10374                 return get_errno(setrlimit(resource, &rlim));
10375             } else {
10376                 return 0;
10377             }
10378         }
10379 #endif
10380 #ifdef TARGET_NR_getrlimit
10381     case TARGET_NR_getrlimit:
10382         {
10383             int resource = target_to_host_resource(arg1);
10384             struct target_rlimit *target_rlim;
10385             struct rlimit rlim;
10386 
10387             ret = get_errno(getrlimit(resource, &rlim));
10388             if (!is_error(ret)) {
10389                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10390                     return -TARGET_EFAULT;
10391                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10392                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10393                 unlock_user_struct(target_rlim, arg2, 1);
10394             }
10395         }
10396         return ret;
10397 #endif
10398     case TARGET_NR_getrusage:
10399         {
10400             struct rusage rusage;
10401             ret = get_errno(getrusage(arg1, &rusage));
10402             if (!is_error(ret)) {
10403                 ret = host_to_target_rusage(arg2, &rusage);
10404             }
10405         }
10406         return ret;
10407 #if defined(TARGET_NR_gettimeofday)
10408     case TARGET_NR_gettimeofday:
10409         {
10410             struct timeval tv;
10411             struct timezone tz;
10412 
10413             ret = get_errno(gettimeofday(&tv, &tz));
10414             if (!is_error(ret)) {
10415                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10416                     return -TARGET_EFAULT;
10417                 }
10418                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10419                     return -TARGET_EFAULT;
10420                 }
10421             }
10422         }
10423         return ret;
10424 #endif
10425 #if defined(TARGET_NR_settimeofday)
10426     case TARGET_NR_settimeofday:
10427         {
10428             struct timeval tv, *ptv = NULL;
10429             struct timezone tz, *ptz = NULL;
10430 
10431             if (arg1) {
10432                 if (copy_from_user_timeval(&tv, arg1)) {
10433                     return -TARGET_EFAULT;
10434                 }
10435                 ptv = &tv;
10436             }
10437 
10438             if (arg2) {
10439                 if (copy_from_user_timezone(&tz, arg2)) {
10440                     return -TARGET_EFAULT;
10441                 }
10442                 ptz = &tz;
10443             }
10444 
10445             return get_errno(settimeofday(ptv, ptz));
10446         }
10447 #endif
10448 #if defined(TARGET_NR_select)
10449     case TARGET_NR_select:
10450 #if defined(TARGET_WANT_NI_OLD_SELECT)
10451         /* some architectures used to have old_select here
10452          * but now ENOSYS it.
10453          */
10454         ret = -TARGET_ENOSYS;
10455 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10456         ret = do_old_select(arg1);
10457 #else
10458         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10459 #endif
10460         return ret;
10461 #endif
10462 #ifdef TARGET_NR_pselect6
10463     case TARGET_NR_pselect6:
10464         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10465 #endif
10466 #ifdef TARGET_NR_pselect6_time64
10467     case TARGET_NR_pselect6_time64:
10468         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10469 #endif
10470 #ifdef TARGET_NR_symlink
10471     case TARGET_NR_symlink:
10472         {
10473             void *p2;
10474             p = lock_user_string(arg1);
10475             p2 = lock_user_string(arg2);
10476             if (!p || !p2)
10477                 ret = -TARGET_EFAULT;
10478             else
10479                 ret = get_errno(symlink(p, p2));
10480             unlock_user(p2, arg2, 0);
10481             unlock_user(p, arg1, 0);
10482         }
10483         return ret;
10484 #endif
10485 #if defined(TARGET_NR_symlinkat)
10486     case TARGET_NR_symlinkat:
10487         {
10488             void *p2;
10489             p  = lock_user_string(arg1);
10490             p2 = lock_user_string(arg3);
10491             if (!p || !p2)
10492                 ret = -TARGET_EFAULT;
10493             else
10494                 ret = get_errno(symlinkat(p, arg2, p2));
10495             unlock_user(p2, arg3, 0);
10496             unlock_user(p, arg1, 0);
10497         }
10498         return ret;
10499 #endif
10500 #ifdef TARGET_NR_readlink
10501     case TARGET_NR_readlink:
10502         {
10503             void *p2;
10504             p = lock_user_string(arg1);
10505             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10506             ret = get_errno(do_guest_readlink(p, p2, arg3));
10507             unlock_user(p2, arg2, ret);
10508             unlock_user(p, arg1, 0);
10509         }
10510         return ret;
10511 #endif
10512 #if defined(TARGET_NR_readlinkat)
10513     case TARGET_NR_readlinkat:
10514         {
10515             void *p2;
10516             p  = lock_user_string(arg2);
10517             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10518             if (!p || !p2) {
10519                 ret = -TARGET_EFAULT;
10520             } else if (!arg4) {
10521                 /* Short circuit this for the magic exe check. */
10522                 ret = -TARGET_EINVAL;
10523             } else if (is_proc_myself((const char *)p, "exe")) {
10524                 /*
10525                  * Don't worry about sign mismatch as earlier mapping
10526                  * logic would have thrown a bad address error.
10527                  */
10528                 ret = MIN(strlen(exec_path), arg4);
10529                 /* We cannot NUL terminate the string. */
10530                 memcpy(p2, exec_path, ret);
10531             } else {
10532                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10533             }
10534             unlock_user(p2, arg3, ret);
10535             unlock_user(p, arg2, 0);
10536         }
10537         return ret;
10538 #endif
10539 #ifdef TARGET_NR_swapon
10540     case TARGET_NR_swapon:
10541         if (!(p = lock_user_string(arg1)))
10542             return -TARGET_EFAULT;
10543         ret = get_errno(swapon(p, arg2));
10544         unlock_user(p, arg1, 0);
10545         return ret;
10546 #endif
10547     case TARGET_NR_reboot:
10548         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10549            /* arg4 must be ignored in all other cases */
10550            p = lock_user_string(arg4);
10551            if (!p) {
10552                return -TARGET_EFAULT;
10553            }
10554            ret = get_errno(reboot(arg1, arg2, arg3, p));
10555            unlock_user(p, arg4, 0);
10556         } else {
10557            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10558         }
10559         return ret;
10560 #ifdef TARGET_NR_mmap
10561     case TARGET_NR_mmap:
10562 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10563     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10564     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10565     || defined(TARGET_S390X)
10566         {
10567             abi_ulong *v;
10568             abi_ulong v1, v2, v3, v4, v5, v6;
10569             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10570                 return -TARGET_EFAULT;
10571             v1 = tswapal(v[0]);
10572             v2 = tswapal(v[1]);
10573             v3 = tswapal(v[2]);
10574             v4 = tswapal(v[3]);
10575             v5 = tswapal(v[4]);
10576             v6 = tswapal(v[5]);
10577             unlock_user(v, arg1, 0);
10578             ret = get_errno(target_mmap(v1, v2, v3,
10579                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10580                                         v5, v6));
10581         }
10582 #else
10583         /* mmap pointers are always untagged */
10584         ret = get_errno(target_mmap(arg1, arg2, arg3,
10585                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10586                                     arg5,
10587                                     arg6));
10588 #endif
10589         return ret;
10590 #endif
10591 #ifdef TARGET_NR_mmap2
10592     case TARGET_NR_mmap2:
10593 #ifndef MMAP_SHIFT
10594 #define MMAP_SHIFT 12
10595 #endif
10596         ret = target_mmap(arg1, arg2, arg3,
10597                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10598                           arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10599         return get_errno(ret);
10600 #endif
10601     case TARGET_NR_munmap:
10602         arg1 = cpu_untagged_addr(cpu, arg1);
10603         return get_errno(target_munmap(arg1, arg2));
10604     case TARGET_NR_mprotect:
10605         arg1 = cpu_untagged_addr(cpu, arg1);
10606         {
10607             TaskState *ts = cpu->opaque;
10608             /* Special hack to detect libc making the stack executable.  */
10609             if ((arg3 & PROT_GROWSDOWN)
10610                 && arg1 >= ts->info->stack_limit
10611                 && arg1 <= ts->info->start_stack) {
10612                 arg3 &= ~PROT_GROWSDOWN;
10613                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10614                 arg1 = ts->info->stack_limit;
10615             }
10616         }
10617         return get_errno(target_mprotect(arg1, arg2, arg3));
10618 #ifdef TARGET_NR_mremap
10619     case TARGET_NR_mremap:
10620         arg1 = cpu_untagged_addr(cpu, arg1);
10621         /* mremap new_addr (arg5) is always untagged */
10622         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10623 #endif
10624         /* ??? msync/mlock/munlock are broken for softmmu.  */
10625 #ifdef TARGET_NR_msync
10626     case TARGET_NR_msync:
10627         return get_errno(msync(g2h(cpu, arg1), arg2,
10628                                target_to_host_msync_arg(arg3)));
10629 #endif
10630 #ifdef TARGET_NR_mlock
10631     case TARGET_NR_mlock:
10632         return get_errno(mlock(g2h(cpu, arg1), arg2));
10633 #endif
10634 #ifdef TARGET_NR_munlock
10635     case TARGET_NR_munlock:
10636         return get_errno(munlock(g2h(cpu, arg1), arg2));
10637 #endif
10638 #ifdef TARGET_NR_mlockall
10639     case TARGET_NR_mlockall:
10640         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10641 #endif
10642 #ifdef TARGET_NR_munlockall
10643     case TARGET_NR_munlockall:
10644         return get_errno(munlockall());
10645 #endif
10646 #ifdef TARGET_NR_truncate
10647     case TARGET_NR_truncate:
10648         if (!(p = lock_user_string(arg1)))
10649             return -TARGET_EFAULT;
10650         ret = get_errno(truncate(p, arg2));
10651         unlock_user(p, arg1, 0);
10652         return ret;
10653 #endif
10654 #ifdef TARGET_NR_ftruncate
10655     case TARGET_NR_ftruncate:
10656         return get_errno(ftruncate(arg1, arg2));
10657 #endif
10658     case TARGET_NR_fchmod:
10659         return get_errno(fchmod(arg1, arg2));
10660 #if defined(TARGET_NR_fchmodat)
10661     case TARGET_NR_fchmodat:
10662         if (!(p = lock_user_string(arg2)))
10663             return -TARGET_EFAULT;
10664         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10665         unlock_user(p, arg2, 0);
10666         return ret;
10667 #endif
10668     case TARGET_NR_getpriority:
10669         /* Note that negative values are valid for getpriority, so we must
10670            differentiate based on errno settings.  */
10671         errno = 0;
10672         ret = getpriority(arg1, arg2);
10673         if (ret == -1 && errno != 0) {
10674             return -host_to_target_errno(errno);
10675         }
10676 #ifdef TARGET_ALPHA
10677         /* Return value is the unbiased priority.  Signal no error.  */
10678         cpu_env->ir[IR_V0] = 0;
10679 #else
10680         /* Return value is a biased priority to avoid negative numbers.  */
10681         ret = 20 - ret;
10682 #endif
10683         return ret;
10684     case TARGET_NR_setpriority:
10685         return get_errno(setpriority(arg1, arg2, arg3));
10686 #ifdef TARGET_NR_statfs
10687     case TARGET_NR_statfs:
10688         if (!(p = lock_user_string(arg1))) {
10689             return -TARGET_EFAULT;
10690         }
10691         ret = get_errno(statfs(path(p), &stfs));
10692         unlock_user(p, arg1, 0);
10693     convert_statfs:
10694         if (!is_error(ret)) {
10695             struct target_statfs *target_stfs;
10696 
10697             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10698                 return -TARGET_EFAULT;
10699             __put_user(stfs.f_type, &target_stfs->f_type);
10700             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10701             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10702             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10703             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10704             __put_user(stfs.f_files, &target_stfs->f_files);
10705             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10706             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10707             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10708             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10709             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10710 #ifdef _STATFS_F_FLAGS
10711             __put_user(stfs.f_flags, &target_stfs->f_flags);
10712 #else
10713             __put_user(0, &target_stfs->f_flags);
10714 #endif
10715             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10716             unlock_user_struct(target_stfs, arg2, 1);
10717         }
10718         return ret;
10719 #endif
10720 #ifdef TARGET_NR_fstatfs
10721     case TARGET_NR_fstatfs:
10722         ret = get_errno(fstatfs(arg1, &stfs));
10723         goto convert_statfs;
10724 #endif
10725 #ifdef TARGET_NR_statfs64
10726     case TARGET_NR_statfs64:
10727         if (!(p = lock_user_string(arg1))) {
10728             return -TARGET_EFAULT;
10729         }
10730         ret = get_errno(statfs(path(p), &stfs));
10731         unlock_user(p, arg1, 0);
10732     convert_statfs64:
10733         if (!is_error(ret)) {
10734             struct target_statfs64 *target_stfs;
10735 
10736             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10737                 return -TARGET_EFAULT;
10738             __put_user(stfs.f_type, &target_stfs->f_type);
10739             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10740             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10741             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10742             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10743             __put_user(stfs.f_files, &target_stfs->f_files);
10744             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10745             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10746             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10747             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10748             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10749 #ifdef _STATFS_F_FLAGS
10750             __put_user(stfs.f_flags, &target_stfs->f_flags);
10751 #else
10752             __put_user(0, &target_stfs->f_flags);
10753 #endif
10754             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10755             unlock_user_struct(target_stfs, arg3, 1);
10756         }
10757         return ret;
10758     case TARGET_NR_fstatfs64:
10759         ret = get_errno(fstatfs(arg1, &stfs));
10760         goto convert_statfs64;
10761 #endif
10762 #ifdef TARGET_NR_socketcall
10763     case TARGET_NR_socketcall:
10764         return do_socketcall(arg1, arg2);
10765 #endif
10766 #ifdef TARGET_NR_accept
10767     case TARGET_NR_accept:
10768         return do_accept4(arg1, arg2, arg3, 0);
10769 #endif
10770 #ifdef TARGET_NR_accept4
10771     case TARGET_NR_accept4:
10772         return do_accept4(arg1, arg2, arg3, arg4);
10773 #endif
10774 #ifdef TARGET_NR_bind
10775     case TARGET_NR_bind:
10776         return do_bind(arg1, arg2, arg3);
10777 #endif
10778 #ifdef TARGET_NR_connect
10779     case TARGET_NR_connect:
10780         return do_connect(arg1, arg2, arg3);
10781 #endif
10782 #ifdef TARGET_NR_getpeername
10783     case TARGET_NR_getpeername:
10784         return do_getpeername(arg1, arg2, arg3);
10785 #endif
10786 #ifdef TARGET_NR_getsockname
10787     case TARGET_NR_getsockname:
10788         return do_getsockname(arg1, arg2, arg3);
10789 #endif
10790 #ifdef TARGET_NR_getsockopt
10791     case TARGET_NR_getsockopt:
10792         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10793 #endif
10794 #ifdef TARGET_NR_listen
10795     case TARGET_NR_listen:
10796         return get_errno(listen(arg1, arg2));
10797 #endif
10798 #ifdef TARGET_NR_recv
10799     case TARGET_NR_recv:
10800         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10801 #endif
10802 #ifdef TARGET_NR_recvfrom
10803     case TARGET_NR_recvfrom:
10804         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10805 #endif
10806 #ifdef TARGET_NR_recvmsg
10807     case TARGET_NR_recvmsg:
10808         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10809 #endif
10810 #ifdef TARGET_NR_send
10811     case TARGET_NR_send:
10812         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10813 #endif
10814 #ifdef TARGET_NR_sendmsg
10815     case TARGET_NR_sendmsg:
10816         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10817 #endif
10818 #ifdef TARGET_NR_sendmmsg
10819     case TARGET_NR_sendmmsg:
10820         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10821 #endif
10822 #ifdef TARGET_NR_recvmmsg
10823     case TARGET_NR_recvmmsg:
10824         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10825 #endif
10826 #ifdef TARGET_NR_sendto
10827     case TARGET_NR_sendto:
10828         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10829 #endif
10830 #ifdef TARGET_NR_shutdown
10831     case TARGET_NR_shutdown:
10832         return get_errno(shutdown(arg1, arg2));
10833 #endif
10834 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10835     case TARGET_NR_getrandom:
10836         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10837         if (!p) {
10838             return -TARGET_EFAULT;
10839         }
10840         ret = get_errno(getrandom(p, arg2, arg3));
10841         unlock_user(p, arg1, ret);
10842         return ret;
10843 #endif
10844 #ifdef TARGET_NR_socket
10845     case TARGET_NR_socket:
10846         return do_socket(arg1, arg2, arg3);
10847 #endif
10848 #ifdef TARGET_NR_socketpair
10849     case TARGET_NR_socketpair:
10850         return do_socketpair(arg1, arg2, arg3, arg4);
10851 #endif
10852 #ifdef TARGET_NR_setsockopt
10853     case TARGET_NR_setsockopt:
10854         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10855 #endif
10856 #if defined(TARGET_NR_syslog)
10857     case TARGET_NR_syslog:
10858         {
10859             int len = arg2;
10860 
10861             switch (arg1) {
10862             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10863             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10864             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10865             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10866             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10867             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10868             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10869             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10870                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10871             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10872             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10873             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10874                 {
10875                     if (len < 0) {
10876                         return -TARGET_EINVAL;
10877                     }
10878                     if (len == 0) {
10879                         return 0;
10880                     }
10881                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10882                     if (!p) {
10883                         return -TARGET_EFAULT;
10884                     }
10885                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10886                     unlock_user(p, arg2, arg3);
10887                 }
10888                 return ret;
10889             default:
10890                 return -TARGET_EINVAL;
10891             }
10892         }
10893         break;
10894 #endif
10895     case TARGET_NR_setitimer:
10896         {
10897             struct itimerval value, ovalue, *pvalue;
10898 
10899             if (arg2) {
10900                 pvalue = &value;
10901                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10902                     || copy_from_user_timeval(&pvalue->it_value,
10903                                               arg2 + sizeof(struct target_timeval)))
10904                     return -TARGET_EFAULT;
10905             } else {
10906                 pvalue = NULL;
10907             }
10908             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10909             if (!is_error(ret) && arg3) {
10910                 if (copy_to_user_timeval(arg3,
10911                                          &ovalue.it_interval)
10912                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10913                                             &ovalue.it_value))
10914                     return -TARGET_EFAULT;
10915             }
10916         }
10917         return ret;
10918     case TARGET_NR_getitimer:
10919         {
10920             struct itimerval value;
10921 
10922             ret = get_errno(getitimer(arg1, &value));
10923             if (!is_error(ret) && arg2) {
10924                 if (copy_to_user_timeval(arg2,
10925                                          &value.it_interval)
10926                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10927                                             &value.it_value))
10928                     return -TARGET_EFAULT;
10929             }
10930         }
10931         return ret;
10932 #ifdef TARGET_NR_stat
10933     case TARGET_NR_stat:
10934         if (!(p = lock_user_string(arg1))) {
10935             return -TARGET_EFAULT;
10936         }
10937         ret = get_errno(stat(path(p), &st));
10938         unlock_user(p, arg1, 0);
10939         goto do_stat;
10940 #endif
10941 #ifdef TARGET_NR_lstat
10942     case TARGET_NR_lstat:
10943         if (!(p = lock_user_string(arg1))) {
10944             return -TARGET_EFAULT;
10945         }
10946         ret = get_errno(lstat(path(p), &st));
10947         unlock_user(p, arg1, 0);
10948         goto do_stat;
10949 #endif
10950 #ifdef TARGET_NR_fstat
10951     case TARGET_NR_fstat:
10952         {
10953             ret = get_errno(fstat(arg1, &st));
10954 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10955         do_stat:
10956 #endif
10957             if (!is_error(ret)) {
10958                 struct target_stat *target_st;
10959 
10960                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10961                     return -TARGET_EFAULT;
10962                 memset(target_st, 0, sizeof(*target_st));
10963                 __put_user(st.st_dev, &target_st->st_dev);
10964                 __put_user(st.st_ino, &target_st->st_ino);
10965                 __put_user(st.st_mode, &target_st->st_mode);
10966                 __put_user(st.st_uid, &target_st->st_uid);
10967                 __put_user(st.st_gid, &target_st->st_gid);
10968                 __put_user(st.st_nlink, &target_st->st_nlink);
10969                 __put_user(st.st_rdev, &target_st->st_rdev);
10970                 __put_user(st.st_size, &target_st->st_size);
10971                 __put_user(st.st_blksize, &target_st->st_blksize);
10972                 __put_user(st.st_blocks, &target_st->st_blocks);
10973                 __put_user(st.st_atime, &target_st->target_st_atime);
10974                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10975                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10976 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10977                 __put_user(st.st_atim.tv_nsec,
10978                            &target_st->target_st_atime_nsec);
10979                 __put_user(st.st_mtim.tv_nsec,
10980                            &target_st->target_st_mtime_nsec);
10981                 __put_user(st.st_ctim.tv_nsec,
10982                            &target_st->target_st_ctime_nsec);
10983 #endif
10984                 unlock_user_struct(target_st, arg2, 1);
10985             }
10986         }
10987         return ret;
10988 #endif
10989     case TARGET_NR_vhangup:
10990         return get_errno(vhangup());
10991 #ifdef TARGET_NR_syscall
10992     case TARGET_NR_syscall:
10993         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10994                           arg6, arg7, arg8, 0);
10995 #endif
10996 #if defined(TARGET_NR_wait4)
10997     case TARGET_NR_wait4:
10998         {
10999             int status;
11000             abi_long status_ptr = arg2;
11001             struct rusage rusage, *rusage_ptr;
11002             abi_ulong target_rusage = arg4;
11003             abi_long rusage_err;
11004             if (target_rusage)
11005                 rusage_ptr = &rusage;
11006             else
11007                 rusage_ptr = NULL;
11008             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11009             if (!is_error(ret)) {
11010                 if (status_ptr && ret) {
11011                     status = host_to_target_waitstatus(status);
11012                     if (put_user_s32(status, status_ptr))
11013                         return -TARGET_EFAULT;
11014                 }
11015                 if (target_rusage) {
11016                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11017                     if (rusage_err) {
11018                         ret = rusage_err;
11019                     }
11020                 }
11021             }
11022         }
11023         return ret;
11024 #endif
11025 #ifdef TARGET_NR_swapoff
11026     case TARGET_NR_swapoff:
11027         if (!(p = lock_user_string(arg1)))
11028             return -TARGET_EFAULT;
11029         ret = get_errno(swapoff(p));
11030         unlock_user(p, arg1, 0);
11031         return ret;
11032 #endif
11033     case TARGET_NR_sysinfo:
11034         {
11035             struct target_sysinfo *target_value;
11036             struct sysinfo value;
11037             ret = get_errno(sysinfo(&value));
11038             if (!is_error(ret) && arg1)
11039             {
11040                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11041                     return -TARGET_EFAULT;
11042                 __put_user(value.uptime, &target_value->uptime);
11043                 __put_user(value.loads[0], &target_value->loads[0]);
11044                 __put_user(value.loads[1], &target_value->loads[1]);
11045                 __put_user(value.loads[2], &target_value->loads[2]);
11046                 __put_user(value.totalram, &target_value->totalram);
11047                 __put_user(value.freeram, &target_value->freeram);
11048                 __put_user(value.sharedram, &target_value->sharedram);
11049                 __put_user(value.bufferram, &target_value->bufferram);
11050                 __put_user(value.totalswap, &target_value->totalswap);
11051                 __put_user(value.freeswap, &target_value->freeswap);
11052                 __put_user(value.procs, &target_value->procs);
11053                 __put_user(value.totalhigh, &target_value->totalhigh);
11054                 __put_user(value.freehigh, &target_value->freehigh);
11055                 __put_user(value.mem_unit, &target_value->mem_unit);
11056                 unlock_user_struct(target_value, arg1, 1);
11057             }
11058         }
11059         return ret;
11060 #ifdef TARGET_NR_ipc
11061     case TARGET_NR_ipc:
11062         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11063 #endif
11064 #ifdef TARGET_NR_semget
11065     case TARGET_NR_semget:
11066         return get_errno(semget(arg1, arg2, arg3));
11067 #endif
11068 #ifdef TARGET_NR_semop
11069     case TARGET_NR_semop:
11070         return do_semtimedop(arg1, arg2, arg3, 0, false);
11071 #endif
11072 #ifdef TARGET_NR_semtimedop
11073     case TARGET_NR_semtimedop:
11074         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11075 #endif
11076 #ifdef TARGET_NR_semtimedop_time64
11077     case TARGET_NR_semtimedop_time64:
11078         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11079 #endif
11080 #ifdef TARGET_NR_semctl
11081     case TARGET_NR_semctl:
11082         return do_semctl(arg1, arg2, arg3, arg4);
11083 #endif
11084 #ifdef TARGET_NR_msgctl
11085     case TARGET_NR_msgctl:
11086         return do_msgctl(arg1, arg2, arg3);
11087 #endif
11088 #ifdef TARGET_NR_msgget
11089     case TARGET_NR_msgget:
11090         return get_errno(msgget(arg1, arg2));
11091 #endif
11092 #ifdef TARGET_NR_msgrcv
11093     case TARGET_NR_msgrcv:
11094         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11095 #endif
11096 #ifdef TARGET_NR_msgsnd
11097     case TARGET_NR_msgsnd:
11098         return do_msgsnd(arg1, arg2, arg3, arg4);
11099 #endif
11100 #ifdef TARGET_NR_shmget
11101     case TARGET_NR_shmget:
11102         return get_errno(shmget(arg1, arg2, arg3));
11103 #endif
11104 #ifdef TARGET_NR_shmctl
11105     case TARGET_NR_shmctl:
11106         return do_shmctl(arg1, arg2, arg3);
11107 #endif
11108 #ifdef TARGET_NR_shmat
11109     case TARGET_NR_shmat:
11110         return do_shmat(cpu_env, arg1, arg2, arg3);
11111 #endif
11112 #ifdef TARGET_NR_shmdt
11113     case TARGET_NR_shmdt:
11114         return do_shmdt(arg1);
11115 #endif
11116     case TARGET_NR_fsync:
11117         return get_errno(fsync(arg1));
11118     case TARGET_NR_clone:
11119         /* Linux manages to have three different orderings for its
11120          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11121          * match the kernel's CONFIG_CLONE_* settings.
11122          * Microblaze is further special in that it uses a sixth
11123          * implicit argument to clone for the TLS pointer.
11124          */
11125 #if defined(TARGET_MICROBLAZE)
11126         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11127 #elif defined(TARGET_CLONE_BACKWARDS)
11128         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11129 #elif defined(TARGET_CLONE_BACKWARDS2)
11130         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11131 #else
11132         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11133 #endif
11134         return ret;
11135 #ifdef __NR_exit_group
11136         /* new thread calls */
11137     case TARGET_NR_exit_group:
11138         preexit_cleanup(cpu_env, arg1);
11139         return get_errno(exit_group(arg1));
11140 #endif
11141     case TARGET_NR_setdomainname:
11142         if (!(p = lock_user_string(arg1)))
11143             return -TARGET_EFAULT;
11144         ret = get_errno(setdomainname(p, arg2));
11145         unlock_user(p, arg1, 0);
11146         return ret;
11147     case TARGET_NR_uname:
11148         /* no need to transcode because we use the linux syscall */
11149         {
11150             struct new_utsname * buf;
11151 
11152             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11153                 return -TARGET_EFAULT;
11154             ret = get_errno(sys_uname(buf));
11155             if (!is_error(ret)) {
11156                 /* Overwrite the native machine name with whatever is being
11157                    emulated. */
11158                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11159                           sizeof(buf->machine));
11160                 /* Allow the user to override the reported release.  */
11161                 if (qemu_uname_release && *qemu_uname_release) {
11162                     g_strlcpy(buf->release, qemu_uname_release,
11163                               sizeof(buf->release));
11164                 }
11165             }
11166             unlock_user_struct(buf, arg1, 1);
11167         }
11168         return ret;
11169 #ifdef TARGET_I386
11170     case TARGET_NR_modify_ldt:
11171         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11172 #if !defined(TARGET_X86_64)
11173     case TARGET_NR_vm86:
11174         return do_vm86(cpu_env, arg1, arg2);
11175 #endif
11176 #endif
11177 #if defined(TARGET_NR_adjtimex)
11178     case TARGET_NR_adjtimex:
11179         {
11180             struct timex host_buf;
11181 
11182             if (target_to_host_timex(&host_buf, arg1) != 0) {
11183                 return -TARGET_EFAULT;
11184             }
11185             ret = get_errno(adjtimex(&host_buf));
11186             if (!is_error(ret)) {
11187                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11188                     return -TARGET_EFAULT;
11189                 }
11190             }
11191         }
11192         return ret;
11193 #endif
11194 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11195     case TARGET_NR_clock_adjtime:
11196         {
11197             struct timex htx;
11198 
11199             if (target_to_host_timex(&htx, arg2) != 0) {
11200                 return -TARGET_EFAULT;
11201             }
11202             ret = get_errno(clock_adjtime(arg1, &htx));
11203             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11204                 return -TARGET_EFAULT;
11205             }
11206         }
11207         return ret;
11208 #endif
11209 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11210     case TARGET_NR_clock_adjtime64:
11211         {
11212             struct timex htx;
11213 
11214             if (target_to_host_timex64(&htx, arg2) != 0) {
11215                 return -TARGET_EFAULT;
11216             }
11217             ret = get_errno(clock_adjtime(arg1, &htx));
11218             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11219                     return -TARGET_EFAULT;
11220             }
11221         }
11222         return ret;
11223 #endif
11224     case TARGET_NR_getpgid:
11225         return get_errno(getpgid(arg1));
11226     case TARGET_NR_fchdir:
11227         return get_errno(fchdir(arg1));
11228     case TARGET_NR_personality:
11229         return get_errno(personality(arg1));
11230 #ifdef TARGET_NR__llseek /* Not on alpha */
11231     case TARGET_NR__llseek:
11232         {
11233             int64_t res;
11234 #if !defined(__NR_llseek)
11235             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11236             if (res == -1) {
11237                 ret = get_errno(res);
11238             } else {
11239                 ret = 0;
11240             }
11241 #else
11242             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11243 #endif
11244             if ((ret == 0) && put_user_s64(res, arg4)) {
11245                 return -TARGET_EFAULT;
11246             }
11247         }
11248         return ret;
11249 #endif
11250 #ifdef TARGET_NR_getdents
11251     case TARGET_NR_getdents:
11252         return do_getdents(arg1, arg2, arg3);
11253 #endif /* TARGET_NR_getdents */
11254 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11255     case TARGET_NR_getdents64:
11256         return do_getdents64(arg1, arg2, arg3);
11257 #endif /* TARGET_NR_getdents64 */
11258 #if defined(TARGET_NR__newselect)
11259     case TARGET_NR__newselect:
11260         return do_select(arg1, arg2, arg3, arg4, arg5);
11261 #endif
11262 #ifdef TARGET_NR_poll
11263     case TARGET_NR_poll:
11264         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11265 #endif
11266 #ifdef TARGET_NR_ppoll
11267     case TARGET_NR_ppoll:
11268         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11269 #endif
11270 #ifdef TARGET_NR_ppoll_time64
11271     case TARGET_NR_ppoll_time64:
11272         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11273 #endif
11274     case TARGET_NR_flock:
11275         /* NOTE: the flock constant seems to be the same for every
11276            Linux platform */
11277         return get_errno(safe_flock(arg1, arg2));
11278     case TARGET_NR_readv:
11279         {
11280             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11281             if (vec != NULL) {
11282                 ret = get_errno(safe_readv(arg1, vec, arg3));
11283                 unlock_iovec(vec, arg2, arg3, 1);
11284             } else {
11285                 ret = -host_to_target_errno(errno);
11286             }
11287         }
11288         return ret;
11289     case TARGET_NR_writev:
11290         {
11291             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11292             if (vec != NULL) {
11293                 ret = get_errno(safe_writev(arg1, vec, arg3));
11294                 unlock_iovec(vec, arg2, arg3, 0);
11295             } else {
11296                 ret = -host_to_target_errno(errno);
11297             }
11298         }
11299         return ret;
11300 #if defined(TARGET_NR_preadv)
11301     case TARGET_NR_preadv:
11302         {
11303             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11304             if (vec != NULL) {
11305                 unsigned long low, high;
11306 
11307                 target_to_host_low_high(arg4, arg5, &low, &high);
11308                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11309                 unlock_iovec(vec, arg2, arg3, 1);
11310             } else {
11311                 ret = -host_to_target_errno(errno);
11312            }
11313         }
11314         return ret;
11315 #endif
11316 #if defined(TARGET_NR_pwritev)
11317     case TARGET_NR_pwritev:
11318         {
11319             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11320             if (vec != NULL) {
11321                 unsigned long low, high;
11322 
11323                 target_to_host_low_high(arg4, arg5, &low, &high);
11324                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11325                 unlock_iovec(vec, arg2, arg3, 0);
11326             } else {
11327                 ret = -host_to_target_errno(errno);
11328            }
11329         }
11330         return ret;
11331 #endif
11332     case TARGET_NR_getsid:
11333         return get_errno(getsid(arg1));
11334 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11335     case TARGET_NR_fdatasync:
11336         return get_errno(fdatasync(arg1));
11337 #endif
11338     case TARGET_NR_sched_getaffinity:
11339         {
11340             unsigned int mask_size;
11341             unsigned long *mask;
11342 
11343             /*
11344              * sched_getaffinity needs multiples of ulong, so need to take
11345              * care of mismatches between target ulong and host ulong sizes.
11346              */
11347             if (arg2 & (sizeof(abi_ulong) - 1)) {
11348                 return -TARGET_EINVAL;
11349             }
11350             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11351 
11352             mask = alloca(mask_size);
11353             memset(mask, 0, mask_size);
11354             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11355 
11356             if (!is_error(ret)) {
11357                 if (ret > arg2) {
11358                     /* More data returned than the caller's buffer will fit.
11359                      * This only happens if sizeof(abi_long) < sizeof(long)
11360                      * and the caller passed us a buffer holding an odd number
11361                      * of abi_longs. If the host kernel is actually using the
11362                      * extra 4 bytes then fail EINVAL; otherwise we can just
11363                      * ignore them and only copy the interesting part.
11364                      */
11365                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11366                     if (numcpus > arg2 * 8) {
11367                         return -TARGET_EINVAL;
11368                     }
11369                     ret = arg2;
11370                 }
11371 
11372                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11373                     return -TARGET_EFAULT;
11374                 }
11375             }
11376         }
11377         return ret;
11378     case TARGET_NR_sched_setaffinity:
11379         {
11380             unsigned int mask_size;
11381             unsigned long *mask;
11382 
11383             /*
11384              * sched_setaffinity needs multiples of ulong, so need to take
11385              * care of mismatches between target ulong and host ulong sizes.
11386              */
11387             if (arg2 & (sizeof(abi_ulong) - 1)) {
11388                 return -TARGET_EINVAL;
11389             }
11390             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11391             mask = alloca(mask_size);
11392 
11393             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11394             if (ret) {
11395                 return ret;
11396             }
11397 
11398             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11399         }
11400     case TARGET_NR_getcpu:
11401         {
11402             unsigned cpu, node;
11403             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11404                                        arg2 ? &node : NULL,
11405                                        NULL));
11406             if (is_error(ret)) {
11407                 return ret;
11408             }
11409             if (arg1 && put_user_u32(cpu, arg1)) {
11410                 return -TARGET_EFAULT;
11411             }
11412             if (arg2 && put_user_u32(node, arg2)) {
11413                 return -TARGET_EFAULT;
11414             }
11415         }
11416         return ret;
11417     case TARGET_NR_sched_setparam:
11418         {
11419             struct target_sched_param *target_schp;
11420             struct sched_param schp;
11421 
11422             if (arg2 == 0) {
11423                 return -TARGET_EINVAL;
11424             }
11425             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11426                 return -TARGET_EFAULT;
11427             }
11428             schp.sched_priority = tswap32(target_schp->sched_priority);
11429             unlock_user_struct(target_schp, arg2, 0);
11430             return get_errno(sys_sched_setparam(arg1, &schp));
11431         }
11432     case TARGET_NR_sched_getparam:
11433         {
11434             struct target_sched_param *target_schp;
11435             struct sched_param schp;
11436 
11437             if (arg2 == 0) {
11438                 return -TARGET_EINVAL;
11439             }
11440             ret = get_errno(sys_sched_getparam(arg1, &schp));
11441             if (!is_error(ret)) {
11442                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11443                     return -TARGET_EFAULT;
11444                 }
11445                 target_schp->sched_priority = tswap32(schp.sched_priority);
11446                 unlock_user_struct(target_schp, arg2, 1);
11447             }
11448         }
11449         return ret;
11450     case TARGET_NR_sched_setscheduler:
11451         {
11452             struct target_sched_param *target_schp;
11453             struct sched_param schp;
11454             if (arg3 == 0) {
11455                 return -TARGET_EINVAL;
11456             }
11457             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11458                 return -TARGET_EFAULT;
11459             }
11460             schp.sched_priority = tswap32(target_schp->sched_priority);
11461             unlock_user_struct(target_schp, arg3, 0);
11462             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11463         }
11464     case TARGET_NR_sched_getscheduler:
11465         return get_errno(sys_sched_getscheduler(arg1));
11466     case TARGET_NR_sched_getattr:
11467         {
11468             struct target_sched_attr *target_scha;
11469             struct sched_attr scha;
11470             if (arg2 == 0) {
11471                 return -TARGET_EINVAL;
11472             }
11473             if (arg3 > sizeof(scha)) {
11474                 arg3 = sizeof(scha);
11475             }
11476             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11477             if (!is_error(ret)) {
11478                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11479                 if (!target_scha) {
11480                     return -TARGET_EFAULT;
11481                 }
11482                 target_scha->size = tswap32(scha.size);
11483                 target_scha->sched_policy = tswap32(scha.sched_policy);
11484                 target_scha->sched_flags = tswap64(scha.sched_flags);
11485                 target_scha->sched_nice = tswap32(scha.sched_nice);
11486                 target_scha->sched_priority = tswap32(scha.sched_priority);
11487                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11488                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11489                 target_scha->sched_period = tswap64(scha.sched_period);
11490                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11491                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11492                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11493                 }
11494                 unlock_user(target_scha, arg2, arg3);
11495             }
11496             return ret;
11497         }
11498     case TARGET_NR_sched_setattr:
11499         {
11500             struct target_sched_attr *target_scha;
11501             struct sched_attr scha;
11502             uint32_t size;
11503             int zeroed;
11504             if (arg2 == 0) {
11505                 return -TARGET_EINVAL;
11506             }
11507             if (get_user_u32(size, arg2)) {
11508                 return -TARGET_EFAULT;
11509             }
11510             if (!size) {
11511                 size = offsetof(struct target_sched_attr, sched_util_min);
11512             }
11513             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11514                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11515                     return -TARGET_EFAULT;
11516                 }
11517                 return -TARGET_E2BIG;
11518             }
11519 
11520             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11521             if (zeroed < 0) {
11522                 return zeroed;
11523             } else if (zeroed == 0) {
11524                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11525                     return -TARGET_EFAULT;
11526                 }
11527                 return -TARGET_E2BIG;
11528             }
11529             if (size > sizeof(struct target_sched_attr)) {
11530                 size = sizeof(struct target_sched_attr);
11531             }
11532 
11533             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11534             if (!target_scha) {
11535                 return -TARGET_EFAULT;
11536             }
11537             scha.size = size;
11538             scha.sched_policy = tswap32(target_scha->sched_policy);
11539             scha.sched_flags = tswap64(target_scha->sched_flags);
11540             scha.sched_nice = tswap32(target_scha->sched_nice);
11541             scha.sched_priority = tswap32(target_scha->sched_priority);
11542             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11543             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11544             scha.sched_period = tswap64(target_scha->sched_period);
11545             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11546                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11547                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11548             }
11549             unlock_user(target_scha, arg2, 0);
11550             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11551         }
11552     case TARGET_NR_sched_yield:
11553         return get_errno(sched_yield());
11554     case TARGET_NR_sched_get_priority_max:
11555         return get_errno(sched_get_priority_max(arg1));
11556     case TARGET_NR_sched_get_priority_min:
11557         return get_errno(sched_get_priority_min(arg1));
11558 #ifdef TARGET_NR_sched_rr_get_interval
11559     case TARGET_NR_sched_rr_get_interval:
11560         {
11561             struct timespec ts;
11562             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11563             if (!is_error(ret)) {
11564                 ret = host_to_target_timespec(arg2, &ts);
11565             }
11566         }
11567         return ret;
11568 #endif
11569 #ifdef TARGET_NR_sched_rr_get_interval_time64
11570     case TARGET_NR_sched_rr_get_interval_time64:
11571         {
11572             struct timespec ts;
11573             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11574             if (!is_error(ret)) {
11575                 ret = host_to_target_timespec64(arg2, &ts);
11576             }
11577         }
11578         return ret;
11579 #endif
11580 #if defined(TARGET_NR_nanosleep)
11581     case TARGET_NR_nanosleep:
11582         {
11583             struct timespec req, rem;
11584             target_to_host_timespec(&req, arg1);
11585             ret = get_errno(safe_nanosleep(&req, &rem));
11586             if (is_error(ret) && arg2) {
11587                 host_to_target_timespec(arg2, &rem);
11588             }
11589         }
11590         return ret;
11591 #endif
11592     case TARGET_NR_prctl:
11593         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11594         break;
11595 #ifdef TARGET_NR_arch_prctl
11596     case TARGET_NR_arch_prctl:
11597         return do_arch_prctl(cpu_env, arg1, arg2);
11598 #endif
11599 #ifdef TARGET_NR_pread64
11600     case TARGET_NR_pread64:
11601         if (regpairs_aligned(cpu_env, num)) {
11602             arg4 = arg5;
11603             arg5 = arg6;
11604         }
11605         if (arg2 == 0 && arg3 == 0) {
11606             /* Special-case NULL buffer and zero length, which should succeed */
11607             p = 0;
11608         } else {
11609             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11610             if (!p) {
11611                 return -TARGET_EFAULT;
11612             }
11613         }
11614         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11615         unlock_user(p, arg2, ret);
11616         return ret;
11617     case TARGET_NR_pwrite64:
11618         if (regpairs_aligned(cpu_env, num)) {
11619             arg4 = arg5;
11620             arg5 = arg6;
11621         }
11622         if (arg2 == 0 && arg3 == 0) {
11623             /* Special-case NULL buffer and zero length, which should succeed */
11624             p = 0;
11625         } else {
11626             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11627             if (!p) {
11628                 return -TARGET_EFAULT;
11629             }
11630         }
11631         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11632         unlock_user(p, arg2, 0);
11633         return ret;
11634 #endif
11635     case TARGET_NR_getcwd:
11636         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11637             return -TARGET_EFAULT;
11638         ret = get_errno(sys_getcwd1(p, arg2));
11639         unlock_user(p, arg1, ret);
11640         return ret;
11641     case TARGET_NR_capget:
11642     case TARGET_NR_capset:
11643     {
11644         struct target_user_cap_header *target_header;
11645         struct target_user_cap_data *target_data = NULL;
11646         struct __user_cap_header_struct header;
11647         struct __user_cap_data_struct data[2];
11648         struct __user_cap_data_struct *dataptr = NULL;
11649         int i, target_datalen;
11650         int data_items = 1;
11651 
11652         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11653             return -TARGET_EFAULT;
11654         }
11655         header.version = tswap32(target_header->version);
11656         header.pid = tswap32(target_header->pid);
11657 
11658         if (header.version != _LINUX_CAPABILITY_VERSION) {
11659             /* Version 2 and up takes pointer to two user_data structs */
11660             data_items = 2;
11661         }
11662 
11663         target_datalen = sizeof(*target_data) * data_items;
11664 
11665         if (arg2) {
11666             if (num == TARGET_NR_capget) {
11667                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11668             } else {
11669                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11670             }
11671             if (!target_data) {
11672                 unlock_user_struct(target_header, arg1, 0);
11673                 return -TARGET_EFAULT;
11674             }
11675 
11676             if (num == TARGET_NR_capset) {
11677                 for (i = 0; i < data_items; i++) {
11678                     data[i].effective = tswap32(target_data[i].effective);
11679                     data[i].permitted = tswap32(target_data[i].permitted);
11680                     data[i].inheritable = tswap32(target_data[i].inheritable);
11681                 }
11682             }
11683 
11684             dataptr = data;
11685         }
11686 
11687         if (num == TARGET_NR_capget) {
11688             ret = get_errno(capget(&header, dataptr));
11689         } else {
11690             ret = get_errno(capset(&header, dataptr));
11691         }
11692 
11693         /* The kernel always updates version for both capget and capset */
11694         target_header->version = tswap32(header.version);
11695         unlock_user_struct(target_header, arg1, 1);
11696 
11697         if (arg2) {
11698             if (num == TARGET_NR_capget) {
11699                 for (i = 0; i < data_items; i++) {
11700                     target_data[i].effective = tswap32(data[i].effective);
11701                     target_data[i].permitted = tswap32(data[i].permitted);
11702                     target_data[i].inheritable = tswap32(data[i].inheritable);
11703                 }
11704                 unlock_user(target_data, arg2, target_datalen);
11705             } else {
11706                 unlock_user(target_data, arg2, 0);
11707             }
11708         }
11709         return ret;
11710     }
11711     case TARGET_NR_sigaltstack:
11712         return do_sigaltstack(arg1, arg2, cpu_env);
11713 
11714 #ifdef CONFIG_SENDFILE
11715 #ifdef TARGET_NR_sendfile
11716     case TARGET_NR_sendfile:
11717     {
11718         off_t *offp = NULL;
11719         off_t off;
11720         if (arg3) {
11721             ret = get_user_sal(off, arg3);
11722             if (is_error(ret)) {
11723                 return ret;
11724             }
11725             offp = &off;
11726         }
11727         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11728         if (!is_error(ret) && arg3) {
11729             abi_long ret2 = put_user_sal(off, arg3);
11730             if (is_error(ret2)) {
11731                 ret = ret2;
11732             }
11733         }
11734         return ret;
11735     }
11736 #endif
11737 #ifdef TARGET_NR_sendfile64
11738     case TARGET_NR_sendfile64:
11739     {
11740         off_t *offp = NULL;
11741         off_t off;
11742         if (arg3) {
11743             ret = get_user_s64(off, arg3);
11744             if (is_error(ret)) {
11745                 return ret;
11746             }
11747             offp = &off;
11748         }
11749         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11750         if (!is_error(ret) && arg3) {
11751             abi_long ret2 = put_user_s64(off, arg3);
11752             if (is_error(ret2)) {
11753                 ret = ret2;
11754             }
11755         }
11756         return ret;
11757     }
11758 #endif
11759 #endif
11760 #ifdef TARGET_NR_vfork
11761     case TARGET_NR_vfork:
11762         return get_errno(do_fork(cpu_env,
11763                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11764                          0, 0, 0, 0));
11765 #endif
11766 #ifdef TARGET_NR_ugetrlimit
11767     case TARGET_NR_ugetrlimit:
11768     {
11769 	struct rlimit rlim;
11770 	int resource = target_to_host_resource(arg1);
11771 	ret = get_errno(getrlimit(resource, &rlim));
11772 	if (!is_error(ret)) {
11773 	    struct target_rlimit *target_rlim;
11774             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11775                 return -TARGET_EFAULT;
11776 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11777 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11778             unlock_user_struct(target_rlim, arg2, 1);
11779 	}
11780         return ret;
11781     }
11782 #endif
11783 #ifdef TARGET_NR_truncate64
11784     case TARGET_NR_truncate64:
11785         if (!(p = lock_user_string(arg1)))
11786             return -TARGET_EFAULT;
11787 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11788         unlock_user(p, arg1, 0);
11789         return ret;
11790 #endif
11791 #ifdef TARGET_NR_ftruncate64
11792     case TARGET_NR_ftruncate64:
11793         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11794 #endif
11795 #ifdef TARGET_NR_stat64
11796     case TARGET_NR_stat64:
11797         if (!(p = lock_user_string(arg1))) {
11798             return -TARGET_EFAULT;
11799         }
11800         ret = get_errno(stat(path(p), &st));
11801         unlock_user(p, arg1, 0);
11802         if (!is_error(ret))
11803             ret = host_to_target_stat64(cpu_env, arg2, &st);
11804         return ret;
11805 #endif
11806 #ifdef TARGET_NR_lstat64
11807     case TARGET_NR_lstat64:
11808         if (!(p = lock_user_string(arg1))) {
11809             return -TARGET_EFAULT;
11810         }
11811         ret = get_errno(lstat(path(p), &st));
11812         unlock_user(p, arg1, 0);
11813         if (!is_error(ret))
11814             ret = host_to_target_stat64(cpu_env, arg2, &st);
11815         return ret;
11816 #endif
11817 #ifdef TARGET_NR_fstat64
11818     case TARGET_NR_fstat64:
11819         ret = get_errno(fstat(arg1, &st));
11820         if (!is_error(ret))
11821             ret = host_to_target_stat64(cpu_env, arg2, &st);
11822         return ret;
11823 #endif
11824 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11825 #ifdef TARGET_NR_fstatat64
11826     case TARGET_NR_fstatat64:
11827 #endif
11828 #ifdef TARGET_NR_newfstatat
11829     case TARGET_NR_newfstatat:
11830 #endif
11831         if (!(p = lock_user_string(arg2))) {
11832             return -TARGET_EFAULT;
11833         }
11834         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11835         unlock_user(p, arg2, 0);
11836         if (!is_error(ret))
11837             ret = host_to_target_stat64(cpu_env, arg3, &st);
11838         return ret;
11839 #endif
11840 #if defined(TARGET_NR_statx)
11841     case TARGET_NR_statx:
11842         {
11843             struct target_statx *target_stx;
11844             int dirfd = arg1;
11845             int flags = arg3;
11846 
11847             p = lock_user_string(arg2);
11848             if (p == NULL) {
11849                 return -TARGET_EFAULT;
11850             }
11851 #if defined(__NR_statx)
11852             {
11853                 /*
11854                  * It is assumed that struct statx is architecture independent.
11855                  */
11856                 struct target_statx host_stx;
11857                 int mask = arg4;
11858 
11859                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11860                 if (!is_error(ret)) {
11861                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11862                         unlock_user(p, arg2, 0);
11863                         return -TARGET_EFAULT;
11864                     }
11865                 }
11866 
11867                 if (ret != -TARGET_ENOSYS) {
11868                     unlock_user(p, arg2, 0);
11869                     return ret;
11870                 }
11871             }
11872 #endif
11873             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11874             unlock_user(p, arg2, 0);
11875 
11876             if (!is_error(ret)) {
11877                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11878                     return -TARGET_EFAULT;
11879                 }
11880                 memset(target_stx, 0, sizeof(*target_stx));
11881                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11882                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11883                 __put_user(st.st_ino, &target_stx->stx_ino);
11884                 __put_user(st.st_mode, &target_stx->stx_mode);
11885                 __put_user(st.st_uid, &target_stx->stx_uid);
11886                 __put_user(st.st_gid, &target_stx->stx_gid);
11887                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11888                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11889                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11890                 __put_user(st.st_size, &target_stx->stx_size);
11891                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11892                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11893                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11894                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11895                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11896                 unlock_user_struct(target_stx, arg5, 1);
11897             }
11898         }
11899         return ret;
11900 #endif
11901 #ifdef TARGET_NR_lchown
11902     case TARGET_NR_lchown:
11903         if (!(p = lock_user_string(arg1)))
11904             return -TARGET_EFAULT;
11905         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11906         unlock_user(p, arg1, 0);
11907         return ret;
11908 #endif
11909 #ifdef TARGET_NR_getuid
11910     case TARGET_NR_getuid:
11911         return get_errno(high2lowuid(getuid()));
11912 #endif
11913 #ifdef TARGET_NR_getgid
11914     case TARGET_NR_getgid:
11915         return get_errno(high2lowgid(getgid()));
11916 #endif
11917 #ifdef TARGET_NR_geteuid
11918     case TARGET_NR_geteuid:
11919         return get_errno(high2lowuid(geteuid()));
11920 #endif
11921 #ifdef TARGET_NR_getegid
11922     case TARGET_NR_getegid:
11923         return get_errno(high2lowgid(getegid()));
11924 #endif
11925     case TARGET_NR_setreuid:
11926         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11927     case TARGET_NR_setregid:
11928         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11929     case TARGET_NR_getgroups:
11930         { /* the same code as for TARGET_NR_getgroups32 */
11931             int gidsetsize = arg1;
11932             target_id *target_grouplist;
11933             g_autofree gid_t *grouplist = NULL;
11934             int i;
11935 
11936             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11937                 return -TARGET_EINVAL;
11938             }
11939             if (gidsetsize > 0) {
11940                 grouplist = g_try_new(gid_t, gidsetsize);
11941                 if (!grouplist) {
11942                     return -TARGET_ENOMEM;
11943                 }
11944             }
11945             ret = get_errno(getgroups(gidsetsize, grouplist));
11946             if (!is_error(ret) && gidsetsize > 0) {
11947                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11948                                              gidsetsize * sizeof(target_id), 0);
11949                 if (!target_grouplist) {
11950                     return -TARGET_EFAULT;
11951                 }
11952                 for (i = 0; i < ret; i++) {
11953                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11954                 }
11955                 unlock_user(target_grouplist, arg2,
11956                             gidsetsize * sizeof(target_id));
11957             }
11958             return ret;
11959         }
11960     case TARGET_NR_setgroups:
11961         { /* the same code as for TARGET_NR_setgroups32 */
11962             int gidsetsize = arg1;
11963             target_id *target_grouplist;
11964             g_autofree gid_t *grouplist = NULL;
11965             int i;
11966 
11967             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11968                 return -TARGET_EINVAL;
11969             }
11970             if (gidsetsize > 0) {
11971                 grouplist = g_try_new(gid_t, gidsetsize);
11972                 if (!grouplist) {
11973                     return -TARGET_ENOMEM;
11974                 }
11975                 target_grouplist = lock_user(VERIFY_READ, arg2,
11976                                              gidsetsize * sizeof(target_id), 1);
11977                 if (!target_grouplist) {
11978                     return -TARGET_EFAULT;
11979                 }
11980                 for (i = 0; i < gidsetsize; i++) {
11981                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11982                 }
11983                 unlock_user(target_grouplist, arg2,
11984                             gidsetsize * sizeof(target_id));
11985             }
11986             return get_errno(setgroups(gidsetsize, grouplist));
11987         }
11988     case TARGET_NR_fchown:
11989         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11990 #if defined(TARGET_NR_fchownat)
11991     case TARGET_NR_fchownat:
11992         if (!(p = lock_user_string(arg2)))
11993             return -TARGET_EFAULT;
11994         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11995                                  low2highgid(arg4), arg5));
11996         unlock_user(p, arg2, 0);
11997         return ret;
11998 #endif
11999 #ifdef TARGET_NR_setresuid
12000     case TARGET_NR_setresuid:
12001         return get_errno(sys_setresuid(low2highuid(arg1),
12002                                        low2highuid(arg2),
12003                                        low2highuid(arg3)));
12004 #endif
12005 #ifdef TARGET_NR_getresuid
12006     case TARGET_NR_getresuid:
12007         {
12008             uid_t ruid, euid, suid;
12009             ret = get_errno(getresuid(&ruid, &euid, &suid));
12010             if (!is_error(ret)) {
12011                 if (put_user_id(high2lowuid(ruid), arg1)
12012                     || put_user_id(high2lowuid(euid), arg2)
12013                     || put_user_id(high2lowuid(suid), arg3))
12014                     return -TARGET_EFAULT;
12015             }
12016         }
12017         return ret;
12018 #endif
12019 #ifdef TARGET_NR_getresgid
12020     case TARGET_NR_setresgid:
12021         return get_errno(sys_setresgid(low2highgid(arg1),
12022                                        low2highgid(arg2),
12023                                        low2highgid(arg3)));
12024 #endif
12025 #ifdef TARGET_NR_getresgid
12026     case TARGET_NR_getresgid:
12027         {
12028             gid_t rgid, egid, sgid;
12029             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12030             if (!is_error(ret)) {
12031                 if (put_user_id(high2lowgid(rgid), arg1)
12032                     || put_user_id(high2lowgid(egid), arg2)
12033                     || put_user_id(high2lowgid(sgid), arg3))
12034                     return -TARGET_EFAULT;
12035             }
12036         }
12037         return ret;
12038 #endif
12039 #ifdef TARGET_NR_chown
12040     case TARGET_NR_chown:
12041         if (!(p = lock_user_string(arg1)))
12042             return -TARGET_EFAULT;
12043         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12044         unlock_user(p, arg1, 0);
12045         return ret;
12046 #endif
12047     case TARGET_NR_setuid:
12048         return get_errno(sys_setuid(low2highuid(arg1)));
12049     case TARGET_NR_setgid:
12050         return get_errno(sys_setgid(low2highgid(arg1)));
12051     case TARGET_NR_setfsuid:
12052         return get_errno(setfsuid(arg1));
12053     case TARGET_NR_setfsgid:
12054         return get_errno(setfsgid(arg1));
12055 
12056 #ifdef TARGET_NR_lchown32
12057     case TARGET_NR_lchown32:
12058         if (!(p = lock_user_string(arg1)))
12059             return -TARGET_EFAULT;
12060         ret = get_errno(lchown(p, arg2, arg3));
12061         unlock_user(p, arg1, 0);
12062         return ret;
12063 #endif
12064 #ifdef TARGET_NR_getuid32
12065     case TARGET_NR_getuid32:
12066         return get_errno(getuid());
12067 #endif
12068 
12069 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12070    /* Alpha specific */
12071     case TARGET_NR_getxuid:
12072          {
12073             uid_t euid;
12074             euid=geteuid();
12075             cpu_env->ir[IR_A4]=euid;
12076          }
12077         return get_errno(getuid());
12078 #endif
12079 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12080    /* Alpha specific */
12081     case TARGET_NR_getxgid:
12082          {
12083             uid_t egid;
12084             egid=getegid();
12085             cpu_env->ir[IR_A4]=egid;
12086          }
12087         return get_errno(getgid());
12088 #endif
12089 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12090     /* Alpha specific */
12091     case TARGET_NR_osf_getsysinfo:
12092         ret = -TARGET_EOPNOTSUPP;
12093         switch (arg1) {
12094           case TARGET_GSI_IEEE_FP_CONTROL:
12095             {
12096                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12097                 uint64_t swcr = cpu_env->swcr;
12098 
12099                 swcr &= ~SWCR_STATUS_MASK;
12100                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12101 
12102                 if (put_user_u64 (swcr, arg2))
12103                         return -TARGET_EFAULT;
12104                 ret = 0;
12105             }
12106             break;
12107 
12108           /* case GSI_IEEE_STATE_AT_SIGNAL:
12109              -- Not implemented in linux kernel.
12110              case GSI_UACPROC:
12111              -- Retrieves current unaligned access state; not much used.
12112              case GSI_PROC_TYPE:
12113              -- Retrieves implver information; surely not used.
12114              case GSI_GET_HWRPB:
12115              -- Grabs a copy of the HWRPB; surely not used.
12116           */
12117         }
12118         return ret;
12119 #endif
12120 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12121     /* Alpha specific */
12122     case TARGET_NR_osf_setsysinfo:
12123         ret = -TARGET_EOPNOTSUPP;
12124         switch (arg1) {
12125           case TARGET_SSI_IEEE_FP_CONTROL:
12126             {
12127                 uint64_t swcr, fpcr;
12128 
12129                 if (get_user_u64 (swcr, arg2)) {
12130                     return -TARGET_EFAULT;
12131                 }
12132 
12133                 /*
12134                  * The kernel calls swcr_update_status to update the
12135                  * status bits from the fpcr at every point that it
12136                  * could be queried.  Therefore, we store the status
12137                  * bits only in FPCR.
12138                  */
12139                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12140 
12141                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12142                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12143                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12144                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12145                 ret = 0;
12146             }
12147             break;
12148 
12149           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12150             {
12151                 uint64_t exc, fpcr, fex;
12152 
12153                 if (get_user_u64(exc, arg2)) {
12154                     return -TARGET_EFAULT;
12155                 }
12156                 exc &= SWCR_STATUS_MASK;
12157                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12158 
12159                 /* Old exceptions are not signaled.  */
12160                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12161                 fex = exc & ~fex;
12162                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12163                 fex &= (cpu_env)->swcr;
12164 
12165                 /* Update the hardware fpcr.  */
12166                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12167                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12168 
12169                 if (fex) {
12170                     int si_code = TARGET_FPE_FLTUNK;
12171                     target_siginfo_t info;
12172 
12173                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12174                         si_code = TARGET_FPE_FLTUND;
12175                     }
12176                     if (fex & SWCR_TRAP_ENABLE_INE) {
12177                         si_code = TARGET_FPE_FLTRES;
12178                     }
12179                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12180                         si_code = TARGET_FPE_FLTUND;
12181                     }
12182                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12183                         si_code = TARGET_FPE_FLTOVF;
12184                     }
12185                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12186                         si_code = TARGET_FPE_FLTDIV;
12187                     }
12188                     if (fex & SWCR_TRAP_ENABLE_INV) {
12189                         si_code = TARGET_FPE_FLTINV;
12190                     }
12191 
12192                     info.si_signo = SIGFPE;
12193                     info.si_errno = 0;
12194                     info.si_code = si_code;
12195                     info._sifields._sigfault._addr = (cpu_env)->pc;
12196                     queue_signal(cpu_env, info.si_signo,
12197                                  QEMU_SI_FAULT, &info);
12198                 }
12199                 ret = 0;
12200             }
12201             break;
12202 
12203           /* case SSI_NVPAIRS:
12204              -- Used with SSIN_UACPROC to enable unaligned accesses.
12205              case SSI_IEEE_STATE_AT_SIGNAL:
12206              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12207              -- Not implemented in linux kernel
12208           */
12209         }
12210         return ret;
12211 #endif
12212 #ifdef TARGET_NR_osf_sigprocmask
12213     /* Alpha specific.  */
12214     case TARGET_NR_osf_sigprocmask:
12215         {
12216             abi_ulong mask;
12217             int how;
12218             sigset_t set, oldset;
12219 
12220             switch(arg1) {
12221             case TARGET_SIG_BLOCK:
12222                 how = SIG_BLOCK;
12223                 break;
12224             case TARGET_SIG_UNBLOCK:
12225                 how = SIG_UNBLOCK;
12226                 break;
12227             case TARGET_SIG_SETMASK:
12228                 how = SIG_SETMASK;
12229                 break;
12230             default:
12231                 return -TARGET_EINVAL;
12232             }
12233             mask = arg2;
12234             target_to_host_old_sigset(&set, &mask);
12235             ret = do_sigprocmask(how, &set, &oldset);
12236             if (!ret) {
12237                 host_to_target_old_sigset(&mask, &oldset);
12238                 ret = mask;
12239             }
12240         }
12241         return ret;
12242 #endif
12243 
12244 #ifdef TARGET_NR_getgid32
12245     case TARGET_NR_getgid32:
12246         return get_errno(getgid());
12247 #endif
12248 #ifdef TARGET_NR_geteuid32
12249     case TARGET_NR_geteuid32:
12250         return get_errno(geteuid());
12251 #endif
12252 #ifdef TARGET_NR_getegid32
12253     case TARGET_NR_getegid32:
12254         return get_errno(getegid());
12255 #endif
12256 #ifdef TARGET_NR_setreuid32
12257     case TARGET_NR_setreuid32:
12258         return get_errno(setreuid(arg1, arg2));
12259 #endif
12260 #ifdef TARGET_NR_setregid32
12261     case TARGET_NR_setregid32:
12262         return get_errno(setregid(arg1, arg2));
12263 #endif
12264 #ifdef TARGET_NR_getgroups32
12265     case TARGET_NR_getgroups32:
12266         { /* the same code as for TARGET_NR_getgroups */
12267             int gidsetsize = arg1;
12268             uint32_t *target_grouplist;
12269             g_autofree gid_t *grouplist = NULL;
12270             int i;
12271 
12272             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12273                 return -TARGET_EINVAL;
12274             }
12275             if (gidsetsize > 0) {
12276                 grouplist = g_try_new(gid_t, gidsetsize);
12277                 if (!grouplist) {
12278                     return -TARGET_ENOMEM;
12279                 }
12280             }
12281             ret = get_errno(getgroups(gidsetsize, grouplist));
12282             if (!is_error(ret) && gidsetsize > 0) {
12283                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12284                                              gidsetsize * 4, 0);
12285                 if (!target_grouplist) {
12286                     return -TARGET_EFAULT;
12287                 }
12288                 for (i = 0; i < ret; i++) {
12289                     target_grouplist[i] = tswap32(grouplist[i]);
12290                 }
12291                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12292             }
12293             return ret;
12294         }
12295 #endif
12296 #ifdef TARGET_NR_setgroups32
12297     case TARGET_NR_setgroups32:
12298         { /* the same code as for TARGET_NR_setgroups */
12299             int gidsetsize = arg1;
12300             uint32_t *target_grouplist;
12301             g_autofree gid_t *grouplist = NULL;
12302             int i;
12303 
12304             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12305                 return -TARGET_EINVAL;
12306             }
12307             if (gidsetsize > 0) {
12308                 grouplist = g_try_new(gid_t, gidsetsize);
12309                 if (!grouplist) {
12310                     return -TARGET_ENOMEM;
12311                 }
12312                 target_grouplist = lock_user(VERIFY_READ, arg2,
12313                                              gidsetsize * 4, 1);
12314                 if (!target_grouplist) {
12315                     return -TARGET_EFAULT;
12316                 }
12317                 for (i = 0; i < gidsetsize; i++) {
12318                     grouplist[i] = tswap32(target_grouplist[i]);
12319                 }
12320                 unlock_user(target_grouplist, arg2, 0);
12321             }
12322             return get_errno(setgroups(gidsetsize, grouplist));
12323         }
12324 #endif
12325 #ifdef TARGET_NR_fchown32
12326     case TARGET_NR_fchown32:
12327         return get_errno(fchown(arg1, arg2, arg3));
12328 #endif
12329 #ifdef TARGET_NR_setresuid32
12330     case TARGET_NR_setresuid32:
12331         return get_errno(sys_setresuid(arg1, arg2, arg3));
12332 #endif
12333 #ifdef TARGET_NR_getresuid32
12334     case TARGET_NR_getresuid32:
12335         {
12336             uid_t ruid, euid, suid;
12337             ret = get_errno(getresuid(&ruid, &euid, &suid));
12338             if (!is_error(ret)) {
12339                 if (put_user_u32(ruid, arg1)
12340                     || put_user_u32(euid, arg2)
12341                     || put_user_u32(suid, arg3))
12342                     return -TARGET_EFAULT;
12343             }
12344         }
12345         return ret;
12346 #endif
12347 #ifdef TARGET_NR_setresgid32
12348     case TARGET_NR_setresgid32:
12349         return get_errno(sys_setresgid(arg1, arg2, arg3));
12350 #endif
12351 #ifdef TARGET_NR_getresgid32
12352     case TARGET_NR_getresgid32:
12353         {
12354             gid_t rgid, egid, sgid;
12355             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12356             if (!is_error(ret)) {
12357                 if (put_user_u32(rgid, arg1)
12358                     || put_user_u32(egid, arg2)
12359                     || put_user_u32(sgid, arg3))
12360                     return -TARGET_EFAULT;
12361             }
12362         }
12363         return ret;
12364 #endif
12365 #ifdef TARGET_NR_chown32
12366     case TARGET_NR_chown32:
12367         if (!(p = lock_user_string(arg1)))
12368             return -TARGET_EFAULT;
12369         ret = get_errno(chown(p, arg2, arg3));
12370         unlock_user(p, arg1, 0);
12371         return ret;
12372 #endif
12373 #ifdef TARGET_NR_setuid32
12374     case TARGET_NR_setuid32:
12375         return get_errno(sys_setuid(arg1));
12376 #endif
12377 #ifdef TARGET_NR_setgid32
12378     case TARGET_NR_setgid32:
12379         return get_errno(sys_setgid(arg1));
12380 #endif
12381 #ifdef TARGET_NR_setfsuid32
12382     case TARGET_NR_setfsuid32:
12383         return get_errno(setfsuid(arg1));
12384 #endif
12385 #ifdef TARGET_NR_setfsgid32
12386     case TARGET_NR_setfsgid32:
12387         return get_errno(setfsgid(arg1));
12388 #endif
12389 #ifdef TARGET_NR_mincore
12390     case TARGET_NR_mincore:
12391         {
12392             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12393             if (!a) {
12394                 return -TARGET_ENOMEM;
12395             }
12396             p = lock_user_string(arg3);
12397             if (!p) {
12398                 ret = -TARGET_EFAULT;
12399             } else {
12400                 ret = get_errno(mincore(a, arg2, p));
12401                 unlock_user(p, arg3, ret);
12402             }
12403             unlock_user(a, arg1, 0);
12404         }
12405         return ret;
12406 #endif
12407 #ifdef TARGET_NR_arm_fadvise64_64
12408     case TARGET_NR_arm_fadvise64_64:
12409         /* arm_fadvise64_64 looks like fadvise64_64 but
12410          * with different argument order: fd, advice, offset, len
12411          * rather than the usual fd, offset, len, advice.
12412          * Note that offset and len are both 64-bit so appear as
12413          * pairs of 32-bit registers.
12414          */
12415         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12416                             target_offset64(arg5, arg6), arg2);
12417         return -host_to_target_errno(ret);
12418 #endif
12419 
12420 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12421 
12422 #ifdef TARGET_NR_fadvise64_64
12423     case TARGET_NR_fadvise64_64:
12424 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12425         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12426         ret = arg2;
12427         arg2 = arg3;
12428         arg3 = arg4;
12429         arg4 = arg5;
12430         arg5 = arg6;
12431         arg6 = ret;
12432 #else
12433         /* 6 args: fd, offset (high, low), len (high, low), advice */
12434         if (regpairs_aligned(cpu_env, num)) {
12435             /* offset is in (3,4), len in (5,6) and advice in 7 */
12436             arg2 = arg3;
12437             arg3 = arg4;
12438             arg4 = arg5;
12439             arg5 = arg6;
12440             arg6 = arg7;
12441         }
12442 #endif
12443         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12444                             target_offset64(arg4, arg5), arg6);
12445         return -host_to_target_errno(ret);
12446 #endif
12447 
12448 #ifdef TARGET_NR_fadvise64
12449     case TARGET_NR_fadvise64:
12450         /* 5 args: fd, offset (high, low), len, advice */
12451         if (regpairs_aligned(cpu_env, num)) {
12452             /* offset is in (3,4), len in 5 and advice in 6 */
12453             arg2 = arg3;
12454             arg3 = arg4;
12455             arg4 = arg5;
12456             arg5 = arg6;
12457         }
12458         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12459         return -host_to_target_errno(ret);
12460 #endif
12461 
12462 #else /* not a 32-bit ABI */
12463 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12464 #ifdef TARGET_NR_fadvise64_64
12465     case TARGET_NR_fadvise64_64:
12466 #endif
12467 #ifdef TARGET_NR_fadvise64
12468     case TARGET_NR_fadvise64:
12469 #endif
12470 #ifdef TARGET_S390X
12471         switch (arg4) {
12472         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12473         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12474         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12475         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12476         default: break;
12477         }
12478 #endif
12479         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12480 #endif
12481 #endif /* end of 64-bit ABI fadvise handling */
12482 
12483 #ifdef TARGET_NR_madvise
12484     case TARGET_NR_madvise:
12485         return target_madvise(arg1, arg2, arg3);
12486 #endif
12487 #ifdef TARGET_NR_fcntl64
12488     case TARGET_NR_fcntl64:
12489     {
12490         int cmd;
12491         struct flock64 fl;
12492         from_flock64_fn *copyfrom = copy_from_user_flock64;
12493         to_flock64_fn *copyto = copy_to_user_flock64;
12494 
12495 #ifdef TARGET_ARM
12496         if (!cpu_env->eabi) {
12497             copyfrom = copy_from_user_oabi_flock64;
12498             copyto = copy_to_user_oabi_flock64;
12499         }
12500 #endif
12501 
12502         cmd = target_to_host_fcntl_cmd(arg2);
12503         if (cmd == -TARGET_EINVAL) {
12504             return cmd;
12505         }
12506 
12507         switch(arg2) {
12508         case TARGET_F_GETLK64:
12509             ret = copyfrom(&fl, arg3);
12510             if (ret) {
12511                 break;
12512             }
12513             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12514             if (ret == 0) {
12515                 ret = copyto(arg3, &fl);
12516             }
12517 	    break;
12518 
12519         case TARGET_F_SETLK64:
12520         case TARGET_F_SETLKW64:
12521             ret = copyfrom(&fl, arg3);
12522             if (ret) {
12523                 break;
12524             }
12525             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12526 	    break;
12527         default:
12528             ret = do_fcntl(arg1, arg2, arg3);
12529             break;
12530         }
12531         return ret;
12532     }
12533 #endif
12534 #ifdef TARGET_NR_cacheflush
12535     case TARGET_NR_cacheflush:
12536         /* self-modifying code is handled automatically, so nothing needed */
12537         return 0;
12538 #endif
12539 #ifdef TARGET_NR_getpagesize
12540     case TARGET_NR_getpagesize:
12541         return TARGET_PAGE_SIZE;
12542 #endif
12543     case TARGET_NR_gettid:
12544         return get_errno(sys_gettid());
12545 #ifdef TARGET_NR_readahead
12546     case TARGET_NR_readahead:
12547 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12548         if (regpairs_aligned(cpu_env, num)) {
12549             arg2 = arg3;
12550             arg3 = arg4;
12551             arg4 = arg5;
12552         }
12553         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12554 #else
12555         ret = get_errno(readahead(arg1, arg2, arg3));
12556 #endif
12557         return ret;
12558 #endif
12559 #ifdef CONFIG_ATTR
12560 #ifdef TARGET_NR_setxattr
12561     case TARGET_NR_listxattr:
12562     case TARGET_NR_llistxattr:
12563     {
12564         void *p, *b = 0;
12565         if (arg2) {
12566             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12567             if (!b) {
12568                 return -TARGET_EFAULT;
12569             }
12570         }
12571         p = lock_user_string(arg1);
12572         if (p) {
12573             if (num == TARGET_NR_listxattr) {
12574                 ret = get_errno(listxattr(p, b, arg3));
12575             } else {
12576                 ret = get_errno(llistxattr(p, b, arg3));
12577             }
12578         } else {
12579             ret = -TARGET_EFAULT;
12580         }
12581         unlock_user(p, arg1, 0);
12582         unlock_user(b, arg2, arg3);
12583         return ret;
12584     }
12585     case TARGET_NR_flistxattr:
12586     {
12587         void *b = 0;
12588         if (arg2) {
12589             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12590             if (!b) {
12591                 return -TARGET_EFAULT;
12592             }
12593         }
12594         ret = get_errno(flistxattr(arg1, b, arg3));
12595         unlock_user(b, arg2, arg3);
12596         return ret;
12597     }
12598     case TARGET_NR_setxattr:
12599     case TARGET_NR_lsetxattr:
12600         {
12601             void *p, *n, *v = 0;
12602             if (arg3) {
12603                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12604                 if (!v) {
12605                     return -TARGET_EFAULT;
12606                 }
12607             }
12608             p = lock_user_string(arg1);
12609             n = lock_user_string(arg2);
12610             if (p && n) {
12611                 if (num == TARGET_NR_setxattr) {
12612                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12613                 } else {
12614                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12615                 }
12616             } else {
12617                 ret = -TARGET_EFAULT;
12618             }
12619             unlock_user(p, arg1, 0);
12620             unlock_user(n, arg2, 0);
12621             unlock_user(v, arg3, 0);
12622         }
12623         return ret;
12624     case TARGET_NR_fsetxattr:
12625         {
12626             void *n, *v = 0;
12627             if (arg3) {
12628                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12629                 if (!v) {
12630                     return -TARGET_EFAULT;
12631                 }
12632             }
12633             n = lock_user_string(arg2);
12634             if (n) {
12635                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12636             } else {
12637                 ret = -TARGET_EFAULT;
12638             }
12639             unlock_user(n, arg2, 0);
12640             unlock_user(v, arg3, 0);
12641         }
12642         return ret;
12643     case TARGET_NR_getxattr:
12644     case TARGET_NR_lgetxattr:
12645         {
12646             void *p, *n, *v = 0;
12647             if (arg3) {
12648                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12649                 if (!v) {
12650                     return -TARGET_EFAULT;
12651                 }
12652             }
12653             p = lock_user_string(arg1);
12654             n = lock_user_string(arg2);
12655             if (p && n) {
12656                 if (num == TARGET_NR_getxattr) {
12657                     ret = get_errno(getxattr(p, n, v, arg4));
12658                 } else {
12659                     ret = get_errno(lgetxattr(p, n, v, arg4));
12660                 }
12661             } else {
12662                 ret = -TARGET_EFAULT;
12663             }
12664             unlock_user(p, arg1, 0);
12665             unlock_user(n, arg2, 0);
12666             unlock_user(v, arg3, arg4);
12667         }
12668         return ret;
12669     case TARGET_NR_fgetxattr:
12670         {
12671             void *n, *v = 0;
12672             if (arg3) {
12673                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12674                 if (!v) {
12675                     return -TARGET_EFAULT;
12676                 }
12677             }
12678             n = lock_user_string(arg2);
12679             if (n) {
12680                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12681             } else {
12682                 ret = -TARGET_EFAULT;
12683             }
12684             unlock_user(n, arg2, 0);
12685             unlock_user(v, arg3, arg4);
12686         }
12687         return ret;
12688     case TARGET_NR_removexattr:
12689     case TARGET_NR_lremovexattr:
12690         {
12691             void *p, *n;
12692             p = lock_user_string(arg1);
12693             n = lock_user_string(arg2);
12694             if (p && n) {
12695                 if (num == TARGET_NR_removexattr) {
12696                     ret = get_errno(removexattr(p, n));
12697                 } else {
12698                     ret = get_errno(lremovexattr(p, n));
12699                 }
12700             } else {
12701                 ret = -TARGET_EFAULT;
12702             }
12703             unlock_user(p, arg1, 0);
12704             unlock_user(n, arg2, 0);
12705         }
12706         return ret;
12707     case TARGET_NR_fremovexattr:
12708         {
12709             void *n;
12710             n = lock_user_string(arg2);
12711             if (n) {
12712                 ret = get_errno(fremovexattr(arg1, n));
12713             } else {
12714                 ret = -TARGET_EFAULT;
12715             }
12716             unlock_user(n, arg2, 0);
12717         }
12718         return ret;
12719 #endif
12720 #endif /* CONFIG_ATTR */
12721 #ifdef TARGET_NR_set_thread_area
12722     case TARGET_NR_set_thread_area:
12723 #if defined(TARGET_MIPS)
12724       cpu_env->active_tc.CP0_UserLocal = arg1;
12725       return 0;
12726 #elif defined(TARGET_CRIS)
12727       if (arg1 & 0xff)
12728           ret = -TARGET_EINVAL;
12729       else {
12730           cpu_env->pregs[PR_PID] = arg1;
12731           ret = 0;
12732       }
12733       return ret;
12734 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12735       return do_set_thread_area(cpu_env, arg1);
12736 #elif defined(TARGET_M68K)
12737       {
12738           TaskState *ts = cpu->opaque;
12739           ts->tp_value = arg1;
12740           return 0;
12741       }
12742 #else
12743       return -TARGET_ENOSYS;
12744 #endif
12745 #endif
12746 #ifdef TARGET_NR_get_thread_area
12747     case TARGET_NR_get_thread_area:
12748 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12749         return do_get_thread_area(cpu_env, arg1);
12750 #elif defined(TARGET_M68K)
12751         {
12752             TaskState *ts = cpu->opaque;
12753             return ts->tp_value;
12754         }
12755 #else
12756         return -TARGET_ENOSYS;
12757 #endif
12758 #endif
12759 #ifdef TARGET_NR_getdomainname
12760     case TARGET_NR_getdomainname:
12761         return -TARGET_ENOSYS;
12762 #endif
12763 
12764 #ifdef TARGET_NR_clock_settime
12765     case TARGET_NR_clock_settime:
12766     {
12767         struct timespec ts;
12768 
12769         ret = target_to_host_timespec(&ts, arg2);
12770         if (!is_error(ret)) {
12771             ret = get_errno(clock_settime(arg1, &ts));
12772         }
12773         return ret;
12774     }
12775 #endif
12776 #ifdef TARGET_NR_clock_settime64
12777     case TARGET_NR_clock_settime64:
12778     {
12779         struct timespec ts;
12780 
12781         ret = target_to_host_timespec64(&ts, arg2);
12782         if (!is_error(ret)) {
12783             ret = get_errno(clock_settime(arg1, &ts));
12784         }
12785         return ret;
12786     }
12787 #endif
12788 #ifdef TARGET_NR_clock_gettime
12789     case TARGET_NR_clock_gettime:
12790     {
12791         struct timespec ts;
12792         ret = get_errno(clock_gettime(arg1, &ts));
12793         if (!is_error(ret)) {
12794             ret = host_to_target_timespec(arg2, &ts);
12795         }
12796         return ret;
12797     }
12798 #endif
12799 #ifdef TARGET_NR_clock_gettime64
12800     case TARGET_NR_clock_gettime64:
12801     {
12802         struct timespec ts;
12803         ret = get_errno(clock_gettime(arg1, &ts));
12804         if (!is_error(ret)) {
12805             ret = host_to_target_timespec64(arg2, &ts);
12806         }
12807         return ret;
12808     }
12809 #endif
12810 #ifdef TARGET_NR_clock_getres
12811     case TARGET_NR_clock_getres:
12812     {
12813         struct timespec ts;
12814         ret = get_errno(clock_getres(arg1, &ts));
12815         if (!is_error(ret)) {
12816             host_to_target_timespec(arg2, &ts);
12817         }
12818         return ret;
12819     }
12820 #endif
12821 #ifdef TARGET_NR_clock_getres_time64
12822     case TARGET_NR_clock_getres_time64:
12823     {
12824         struct timespec ts;
12825         ret = get_errno(clock_getres(arg1, &ts));
12826         if (!is_error(ret)) {
12827             host_to_target_timespec64(arg2, &ts);
12828         }
12829         return ret;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_clock_nanosleep
12833     case TARGET_NR_clock_nanosleep:
12834     {
12835         struct timespec ts;
12836         if (target_to_host_timespec(&ts, arg3)) {
12837             return -TARGET_EFAULT;
12838         }
12839         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12840                                              &ts, arg4 ? &ts : NULL));
12841         /*
12842          * if the call is interrupted by a signal handler, it fails
12843          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12844          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12845          */
12846         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12847             host_to_target_timespec(arg4, &ts)) {
12848               return -TARGET_EFAULT;
12849         }
12850 
12851         return ret;
12852     }
12853 #endif
12854 #ifdef TARGET_NR_clock_nanosleep_time64
12855     case TARGET_NR_clock_nanosleep_time64:
12856     {
12857         struct timespec ts;
12858 
12859         if (target_to_host_timespec64(&ts, arg3)) {
12860             return -TARGET_EFAULT;
12861         }
12862 
12863         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12864                                              &ts, arg4 ? &ts : NULL));
12865 
12866         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12867             host_to_target_timespec64(arg4, &ts)) {
12868             return -TARGET_EFAULT;
12869         }
12870         return ret;
12871     }
12872 #endif
12873 
12874 #if defined(TARGET_NR_set_tid_address)
12875     case TARGET_NR_set_tid_address:
12876     {
12877         TaskState *ts = cpu->opaque;
12878         ts->child_tidptr = arg1;
12879         /* do not call host set_tid_address() syscall, instead return tid() */
12880         return get_errno(sys_gettid());
12881     }
12882 #endif
12883 
12884     case TARGET_NR_tkill:
12885         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12886 
12887     case TARGET_NR_tgkill:
12888         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12889                          target_to_host_signal(arg3)));
12890 
12891 #ifdef TARGET_NR_set_robust_list
12892     case TARGET_NR_set_robust_list:
12893     case TARGET_NR_get_robust_list:
12894         /* The ABI for supporting robust futexes has userspace pass
12895          * the kernel a pointer to a linked list which is updated by
12896          * userspace after the syscall; the list is walked by the kernel
12897          * when the thread exits. Since the linked list in QEMU guest
12898          * memory isn't a valid linked list for the host and we have
12899          * no way to reliably intercept the thread-death event, we can't
12900          * support these. Silently return ENOSYS so that guest userspace
12901          * falls back to a non-robust futex implementation (which should
12902          * be OK except in the corner case of the guest crashing while
12903          * holding a mutex that is shared with another process via
12904          * shared memory).
12905          */
12906         return -TARGET_ENOSYS;
12907 #endif
12908 
12909 #if defined(TARGET_NR_utimensat)
12910     case TARGET_NR_utimensat:
12911         {
12912             struct timespec *tsp, ts[2];
12913             if (!arg3) {
12914                 tsp = NULL;
12915             } else {
12916                 if (target_to_host_timespec(ts, arg3)) {
12917                     return -TARGET_EFAULT;
12918                 }
12919                 if (target_to_host_timespec(ts + 1, arg3 +
12920                                             sizeof(struct target_timespec))) {
12921                     return -TARGET_EFAULT;
12922                 }
12923                 tsp = ts;
12924             }
12925             if (!arg2)
12926                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12927             else {
12928                 if (!(p = lock_user_string(arg2))) {
12929                     return -TARGET_EFAULT;
12930                 }
12931                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12932                 unlock_user(p, arg2, 0);
12933             }
12934         }
12935         return ret;
12936 #endif
12937 #ifdef TARGET_NR_utimensat_time64
12938     case TARGET_NR_utimensat_time64:
12939         {
12940             struct timespec *tsp, ts[2];
12941             if (!arg3) {
12942                 tsp = NULL;
12943             } else {
12944                 if (target_to_host_timespec64(ts, arg3)) {
12945                     return -TARGET_EFAULT;
12946                 }
12947                 if (target_to_host_timespec64(ts + 1, arg3 +
12948                                      sizeof(struct target__kernel_timespec))) {
12949                     return -TARGET_EFAULT;
12950                 }
12951                 tsp = ts;
12952             }
12953             if (!arg2)
12954                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12955             else {
12956                 p = lock_user_string(arg2);
12957                 if (!p) {
12958                     return -TARGET_EFAULT;
12959                 }
12960                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12961                 unlock_user(p, arg2, 0);
12962             }
12963         }
12964         return ret;
12965 #endif
12966 #ifdef TARGET_NR_futex
12967     case TARGET_NR_futex:
12968         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12969 #endif
12970 #ifdef TARGET_NR_futex_time64
12971     case TARGET_NR_futex_time64:
12972         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12973 #endif
12974 #ifdef CONFIG_INOTIFY
12975 #if defined(TARGET_NR_inotify_init)
12976     case TARGET_NR_inotify_init:
12977         ret = get_errno(inotify_init());
12978         if (ret >= 0) {
12979             fd_trans_register(ret, &target_inotify_trans);
12980         }
12981         return ret;
12982 #endif
12983 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12984     case TARGET_NR_inotify_init1:
12985         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12986                                           fcntl_flags_tbl)));
12987         if (ret >= 0) {
12988             fd_trans_register(ret, &target_inotify_trans);
12989         }
12990         return ret;
12991 #endif
12992 #if defined(TARGET_NR_inotify_add_watch)
12993     case TARGET_NR_inotify_add_watch:
12994         p = lock_user_string(arg2);
12995         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12996         unlock_user(p, arg2, 0);
12997         return ret;
12998 #endif
12999 #if defined(TARGET_NR_inotify_rm_watch)
13000     case TARGET_NR_inotify_rm_watch:
13001         return get_errno(inotify_rm_watch(arg1, arg2));
13002 #endif
13003 #endif
13004 
13005 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13006     case TARGET_NR_mq_open:
13007         {
13008             struct mq_attr posix_mq_attr;
13009             struct mq_attr *pposix_mq_attr;
13010             int host_flags;
13011 
13012             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13013             pposix_mq_attr = NULL;
13014             if (arg4) {
13015                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13016                     return -TARGET_EFAULT;
13017                 }
13018                 pposix_mq_attr = &posix_mq_attr;
13019             }
13020             p = lock_user_string(arg1 - 1);
13021             if (!p) {
13022                 return -TARGET_EFAULT;
13023             }
13024             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13025             unlock_user (p, arg1, 0);
13026         }
13027         return ret;
13028 
13029     case TARGET_NR_mq_unlink:
13030         p = lock_user_string(arg1 - 1);
13031         if (!p) {
13032             return -TARGET_EFAULT;
13033         }
13034         ret = get_errno(mq_unlink(p));
13035         unlock_user (p, arg1, 0);
13036         return ret;
13037 
13038 #ifdef TARGET_NR_mq_timedsend
13039     case TARGET_NR_mq_timedsend:
13040         {
13041             struct timespec ts;
13042 
13043             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13044             if (arg5 != 0) {
13045                 if (target_to_host_timespec(&ts, arg5)) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13049                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13050                     return -TARGET_EFAULT;
13051                 }
13052             } else {
13053                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13054             }
13055             unlock_user (p, arg2, arg3);
13056         }
13057         return ret;
13058 #endif
13059 #ifdef TARGET_NR_mq_timedsend_time64
13060     case TARGET_NR_mq_timedsend_time64:
13061         {
13062             struct timespec ts;
13063 
13064             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13065             if (arg5 != 0) {
13066                 if (target_to_host_timespec64(&ts, arg5)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13070                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13071                     return -TARGET_EFAULT;
13072                 }
13073             } else {
13074                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13075             }
13076             unlock_user(p, arg2, arg3);
13077         }
13078         return ret;
13079 #endif
13080 
13081 #ifdef TARGET_NR_mq_timedreceive
13082     case TARGET_NR_mq_timedreceive:
13083         {
13084             struct timespec ts;
13085             unsigned int prio;
13086 
13087             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13088             if (arg5 != 0) {
13089                 if (target_to_host_timespec(&ts, arg5)) {
13090                     return -TARGET_EFAULT;
13091                 }
13092                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13093                                                      &prio, &ts));
13094                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13095                     return -TARGET_EFAULT;
13096                 }
13097             } else {
13098                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13099                                                      &prio, NULL));
13100             }
13101             unlock_user (p, arg2, arg3);
13102             if (arg4 != 0)
13103                 put_user_u32(prio, arg4);
13104         }
13105         return ret;
13106 #endif
13107 #ifdef TARGET_NR_mq_timedreceive_time64
13108     case TARGET_NR_mq_timedreceive_time64:
13109         {
13110             struct timespec ts;
13111             unsigned int prio;
13112 
13113             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13114             if (arg5 != 0) {
13115                 if (target_to_host_timespec64(&ts, arg5)) {
13116                     return -TARGET_EFAULT;
13117                 }
13118                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13119                                                      &prio, &ts));
13120                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13121                     return -TARGET_EFAULT;
13122                 }
13123             } else {
13124                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13125                                                      &prio, NULL));
13126             }
13127             unlock_user(p, arg2, arg3);
13128             if (arg4 != 0) {
13129                 put_user_u32(prio, arg4);
13130             }
13131         }
13132         return ret;
13133 #endif
13134 
13135     /* Not implemented for now... */
13136 /*     case TARGET_NR_mq_notify: */
13137 /*         break; */
13138 
13139     case TARGET_NR_mq_getsetattr:
13140         {
13141             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13142             ret = 0;
13143             if (arg2 != 0) {
13144                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13145                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13146                                            &posix_mq_attr_out));
13147             } else if (arg3 != 0) {
13148                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13149             }
13150             if (ret == 0 && arg3 != 0) {
13151                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13152             }
13153         }
13154         return ret;
13155 #endif
13156 
13157 #ifdef CONFIG_SPLICE
13158 #ifdef TARGET_NR_tee
13159     case TARGET_NR_tee:
13160         {
13161             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13162         }
13163         return ret;
13164 #endif
13165 #ifdef TARGET_NR_splice
13166     case TARGET_NR_splice:
13167         {
13168             loff_t loff_in, loff_out;
13169             loff_t *ploff_in = NULL, *ploff_out = NULL;
13170             if (arg2) {
13171                 if (get_user_u64(loff_in, arg2)) {
13172                     return -TARGET_EFAULT;
13173                 }
13174                 ploff_in = &loff_in;
13175             }
13176             if (arg4) {
13177                 if (get_user_u64(loff_out, arg4)) {
13178                     return -TARGET_EFAULT;
13179                 }
13180                 ploff_out = &loff_out;
13181             }
13182             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13183             if (arg2) {
13184                 if (put_user_u64(loff_in, arg2)) {
13185                     return -TARGET_EFAULT;
13186                 }
13187             }
13188             if (arg4) {
13189                 if (put_user_u64(loff_out, arg4)) {
13190                     return -TARGET_EFAULT;
13191                 }
13192             }
13193         }
13194         return ret;
13195 #endif
13196 #ifdef TARGET_NR_vmsplice
13197 	case TARGET_NR_vmsplice:
13198         {
13199             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13200             if (vec != NULL) {
13201                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13202                 unlock_iovec(vec, arg2, arg3, 0);
13203             } else {
13204                 ret = -host_to_target_errno(errno);
13205             }
13206         }
13207         return ret;
13208 #endif
13209 #endif /* CONFIG_SPLICE */
13210 #ifdef CONFIG_EVENTFD
13211 #if defined(TARGET_NR_eventfd)
13212     case TARGET_NR_eventfd:
13213         ret = get_errno(eventfd(arg1, 0));
13214         if (ret >= 0) {
13215             fd_trans_register(ret, &target_eventfd_trans);
13216         }
13217         return ret;
13218 #endif
13219 #if defined(TARGET_NR_eventfd2)
13220     case TARGET_NR_eventfd2:
13221     {
13222         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13223         if (arg2 & TARGET_O_NONBLOCK) {
13224             host_flags |= O_NONBLOCK;
13225         }
13226         if (arg2 & TARGET_O_CLOEXEC) {
13227             host_flags |= O_CLOEXEC;
13228         }
13229         ret = get_errno(eventfd(arg1, host_flags));
13230         if (ret >= 0) {
13231             fd_trans_register(ret, &target_eventfd_trans);
13232         }
13233         return ret;
13234     }
13235 #endif
13236 #endif /* CONFIG_EVENTFD  */
13237 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13238     case TARGET_NR_fallocate:
13239 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13240         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13241                                   target_offset64(arg5, arg6)));
13242 #else
13243         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13244 #endif
13245         return ret;
13246 #endif
13247 #if defined(CONFIG_SYNC_FILE_RANGE)
13248 #if defined(TARGET_NR_sync_file_range)
13249     case TARGET_NR_sync_file_range:
13250 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13251 #if defined(TARGET_MIPS)
13252         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13253                                         target_offset64(arg5, arg6), arg7));
13254 #else
13255         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13256                                         target_offset64(arg4, arg5), arg6));
13257 #endif /* !TARGET_MIPS */
13258 #else
13259         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13260 #endif
13261         return ret;
13262 #endif
13263 #if defined(TARGET_NR_sync_file_range2) || \
13264     defined(TARGET_NR_arm_sync_file_range)
13265 #if defined(TARGET_NR_sync_file_range2)
13266     case TARGET_NR_sync_file_range2:
13267 #endif
13268 #if defined(TARGET_NR_arm_sync_file_range)
13269     case TARGET_NR_arm_sync_file_range:
13270 #endif
13271         /* This is like sync_file_range but the arguments are reordered */
13272 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13273         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13274                                         target_offset64(arg5, arg6), arg2));
13275 #else
13276         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13277 #endif
13278         return ret;
13279 #endif
13280 #endif
13281 #if defined(TARGET_NR_signalfd4)
13282     case TARGET_NR_signalfd4:
13283         return do_signalfd4(arg1, arg2, arg4);
13284 #endif
13285 #if defined(TARGET_NR_signalfd)
13286     case TARGET_NR_signalfd:
13287         return do_signalfd4(arg1, arg2, 0);
13288 #endif
13289 #if defined(CONFIG_EPOLL)
13290 #if defined(TARGET_NR_epoll_create)
13291     case TARGET_NR_epoll_create:
13292         return get_errno(epoll_create(arg1));
13293 #endif
13294 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13295     case TARGET_NR_epoll_create1:
13296         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13297 #endif
13298 #if defined(TARGET_NR_epoll_ctl)
13299     case TARGET_NR_epoll_ctl:
13300     {
13301         struct epoll_event ep;
13302         struct epoll_event *epp = 0;
13303         if (arg4) {
13304             if (arg2 != EPOLL_CTL_DEL) {
13305                 struct target_epoll_event *target_ep;
13306                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13307                     return -TARGET_EFAULT;
13308                 }
13309                 ep.events = tswap32(target_ep->events);
13310                 /*
13311                  * The epoll_data_t union is just opaque data to the kernel,
13312                  * so we transfer all 64 bits across and need not worry what
13313                  * actual data type it is.
13314                  */
13315                 ep.data.u64 = tswap64(target_ep->data.u64);
13316                 unlock_user_struct(target_ep, arg4, 0);
13317             }
13318             /*
13319              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13320              * non-null pointer, even though this argument is ignored.
13321              *
13322              */
13323             epp = &ep;
13324         }
13325         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13326     }
13327 #endif
13328 
13329 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13330 #if defined(TARGET_NR_epoll_wait)
13331     case TARGET_NR_epoll_wait:
13332 #endif
13333 #if defined(TARGET_NR_epoll_pwait)
13334     case TARGET_NR_epoll_pwait:
13335 #endif
13336     {
13337         struct target_epoll_event *target_ep;
13338         struct epoll_event *ep;
13339         int epfd = arg1;
13340         int maxevents = arg3;
13341         int timeout = arg4;
13342 
13343         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13344             return -TARGET_EINVAL;
13345         }
13346 
13347         target_ep = lock_user(VERIFY_WRITE, arg2,
13348                               maxevents * sizeof(struct target_epoll_event), 1);
13349         if (!target_ep) {
13350             return -TARGET_EFAULT;
13351         }
13352 
13353         ep = g_try_new(struct epoll_event, maxevents);
13354         if (!ep) {
13355             unlock_user(target_ep, arg2, 0);
13356             return -TARGET_ENOMEM;
13357         }
13358 
13359         switch (num) {
13360 #if defined(TARGET_NR_epoll_pwait)
13361         case TARGET_NR_epoll_pwait:
13362         {
13363             sigset_t *set = NULL;
13364 
13365             if (arg5) {
13366                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13367                 if (ret != 0) {
13368                     break;
13369                 }
13370             }
13371 
13372             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13373                                              set, SIGSET_T_SIZE));
13374 
13375             if (set) {
13376                 finish_sigsuspend_mask(ret);
13377             }
13378             break;
13379         }
13380 #endif
13381 #if defined(TARGET_NR_epoll_wait)
13382         case TARGET_NR_epoll_wait:
13383             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13384                                              NULL, 0));
13385             break;
13386 #endif
13387         default:
13388             ret = -TARGET_ENOSYS;
13389         }
13390         if (!is_error(ret)) {
13391             int i;
13392             for (i = 0; i < ret; i++) {
13393                 target_ep[i].events = tswap32(ep[i].events);
13394                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13395             }
13396             unlock_user(target_ep, arg2,
13397                         ret * sizeof(struct target_epoll_event));
13398         } else {
13399             unlock_user(target_ep, arg2, 0);
13400         }
13401         g_free(ep);
13402         return ret;
13403     }
13404 #endif
13405 #endif
13406 #ifdef TARGET_NR_prlimit64
13407     case TARGET_NR_prlimit64:
13408     {
13409         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13410         struct target_rlimit64 *target_rnew, *target_rold;
13411         struct host_rlimit64 rnew, rold, *rnewp = 0;
13412         int resource = target_to_host_resource(arg2);
13413 
13414         if (arg3 && (resource != RLIMIT_AS &&
13415                      resource != RLIMIT_DATA &&
13416                      resource != RLIMIT_STACK)) {
13417             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13418                 return -TARGET_EFAULT;
13419             }
13420             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13421             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13422             unlock_user_struct(target_rnew, arg3, 0);
13423             rnewp = &rnew;
13424         }
13425 
13426         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13427         if (!is_error(ret) && arg4) {
13428             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13429                 return -TARGET_EFAULT;
13430             }
13431             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13432             __put_user(rold.rlim_max, &target_rold->rlim_max);
13433             unlock_user_struct(target_rold, arg4, 1);
13434         }
13435         return ret;
13436     }
13437 #endif
13438 #ifdef TARGET_NR_gethostname
13439     case TARGET_NR_gethostname:
13440     {
13441         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13442         if (name) {
13443             ret = get_errno(gethostname(name, arg2));
13444             unlock_user(name, arg1, arg2);
13445         } else {
13446             ret = -TARGET_EFAULT;
13447         }
13448         return ret;
13449     }
13450 #endif
13451 #ifdef TARGET_NR_atomic_cmpxchg_32
13452     case TARGET_NR_atomic_cmpxchg_32:
13453     {
13454         /* should use start_exclusive from main.c */
13455         abi_ulong mem_value;
13456         if (get_user_u32(mem_value, arg6)) {
13457             target_siginfo_t info;
13458             info.si_signo = SIGSEGV;
13459             info.si_errno = 0;
13460             info.si_code = TARGET_SEGV_MAPERR;
13461             info._sifields._sigfault._addr = arg6;
13462             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13463             ret = 0xdeadbeef;
13464 
13465         }
13466         if (mem_value == arg2)
13467             put_user_u32(arg1, arg6);
13468         return mem_value;
13469     }
13470 #endif
13471 #ifdef TARGET_NR_atomic_barrier
13472     case TARGET_NR_atomic_barrier:
13473         /* Like the kernel implementation and the
13474            qemu arm barrier, no-op this? */
13475         return 0;
13476 #endif
13477 
13478 #ifdef TARGET_NR_timer_create
13479     case TARGET_NR_timer_create:
13480     {
13481         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13482 
13483         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13484 
13485         int clkid = arg1;
13486         int timer_index = next_free_host_timer();
13487 
13488         if (timer_index < 0) {
13489             ret = -TARGET_EAGAIN;
13490         } else {
13491             timer_t *phtimer = g_posix_timers  + timer_index;
13492 
13493             if (arg2) {
13494                 phost_sevp = &host_sevp;
13495                 ret = target_to_host_sigevent(phost_sevp, arg2);
13496                 if (ret != 0) {
13497                     free_host_timer_slot(timer_index);
13498                     return ret;
13499                 }
13500             }
13501 
13502             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13503             if (ret) {
13504                 free_host_timer_slot(timer_index);
13505             } else {
13506                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13507                     timer_delete(*phtimer);
13508                     free_host_timer_slot(timer_index);
13509                     return -TARGET_EFAULT;
13510                 }
13511             }
13512         }
13513         return ret;
13514     }
13515 #endif
13516 
13517 #ifdef TARGET_NR_timer_settime
13518     case TARGET_NR_timer_settime:
13519     {
13520         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13521          * struct itimerspec * old_value */
13522         target_timer_t timerid = get_timer_id(arg1);
13523 
13524         if (timerid < 0) {
13525             ret = timerid;
13526         } else if (arg3 == 0) {
13527             ret = -TARGET_EINVAL;
13528         } else {
13529             timer_t htimer = g_posix_timers[timerid];
13530             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13531 
13532             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13533                 return -TARGET_EFAULT;
13534             }
13535             ret = get_errno(
13536                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13537             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13538                 return -TARGET_EFAULT;
13539             }
13540         }
13541         return ret;
13542     }
13543 #endif
13544 
13545 #ifdef TARGET_NR_timer_settime64
13546     case TARGET_NR_timer_settime64:
13547     {
13548         target_timer_t timerid = get_timer_id(arg1);
13549 
13550         if (timerid < 0) {
13551             ret = timerid;
13552         } else if (arg3 == 0) {
13553             ret = -TARGET_EINVAL;
13554         } else {
13555             timer_t htimer = g_posix_timers[timerid];
13556             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13557 
13558             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13559                 return -TARGET_EFAULT;
13560             }
13561             ret = get_errno(
13562                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13563             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13564                 return -TARGET_EFAULT;
13565             }
13566         }
13567         return ret;
13568     }
13569 #endif
13570 
13571 #ifdef TARGET_NR_timer_gettime
13572     case TARGET_NR_timer_gettime:
13573     {
13574         /* args: timer_t timerid, struct itimerspec *curr_value */
13575         target_timer_t timerid = get_timer_id(arg1);
13576 
13577         if (timerid < 0) {
13578             ret = timerid;
13579         } else if (!arg2) {
13580             ret = -TARGET_EFAULT;
13581         } else {
13582             timer_t htimer = g_posix_timers[timerid];
13583             struct itimerspec hspec;
13584             ret = get_errno(timer_gettime(htimer, &hspec));
13585 
13586             if (host_to_target_itimerspec(arg2, &hspec)) {
13587                 ret = -TARGET_EFAULT;
13588             }
13589         }
13590         return ret;
13591     }
13592 #endif
13593 
13594 #ifdef TARGET_NR_timer_gettime64
13595     case TARGET_NR_timer_gettime64:
13596     {
13597         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13598         target_timer_t timerid = get_timer_id(arg1);
13599 
13600         if (timerid < 0) {
13601             ret = timerid;
13602         } else if (!arg2) {
13603             ret = -TARGET_EFAULT;
13604         } else {
13605             timer_t htimer = g_posix_timers[timerid];
13606             struct itimerspec hspec;
13607             ret = get_errno(timer_gettime(htimer, &hspec));
13608 
13609             if (host_to_target_itimerspec64(arg2, &hspec)) {
13610                 ret = -TARGET_EFAULT;
13611             }
13612         }
13613         return ret;
13614     }
13615 #endif
13616 
13617 #ifdef TARGET_NR_timer_getoverrun
13618     case TARGET_NR_timer_getoverrun:
13619     {
13620         /* args: timer_t timerid */
13621         target_timer_t timerid = get_timer_id(arg1);
13622 
13623         if (timerid < 0) {
13624             ret = timerid;
13625         } else {
13626             timer_t htimer = g_posix_timers[timerid];
13627             ret = get_errno(timer_getoverrun(htimer));
13628         }
13629         return ret;
13630     }
13631 #endif
13632 
13633 #ifdef TARGET_NR_timer_delete
13634     case TARGET_NR_timer_delete:
13635     {
13636         /* args: timer_t timerid */
13637         target_timer_t timerid = get_timer_id(arg1);
13638 
13639         if (timerid < 0) {
13640             ret = timerid;
13641         } else {
13642             timer_t htimer = g_posix_timers[timerid];
13643             ret = get_errno(timer_delete(htimer));
13644             free_host_timer_slot(timerid);
13645         }
13646         return ret;
13647     }
13648 #endif
13649 
13650 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13651     case TARGET_NR_timerfd_create:
13652         ret = get_errno(timerfd_create(arg1,
13653                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13654         if (ret >= 0) {
13655             fd_trans_register(ret, &target_timerfd_trans);
13656         }
13657         return ret;
13658 #endif
13659 
13660 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13661     case TARGET_NR_timerfd_gettime:
13662         {
13663             struct itimerspec its_curr;
13664 
13665             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13666 
13667             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13668                 return -TARGET_EFAULT;
13669             }
13670         }
13671         return ret;
13672 #endif
13673 
13674 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13675     case TARGET_NR_timerfd_gettime64:
13676         {
13677             struct itimerspec its_curr;
13678 
13679             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13680 
13681             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13682                 return -TARGET_EFAULT;
13683             }
13684         }
13685         return ret;
13686 #endif
13687 
13688 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13689     case TARGET_NR_timerfd_settime:
13690         {
13691             struct itimerspec its_new, its_old, *p_new;
13692 
13693             if (arg3) {
13694                 if (target_to_host_itimerspec(&its_new, arg3)) {
13695                     return -TARGET_EFAULT;
13696                 }
13697                 p_new = &its_new;
13698             } else {
13699                 p_new = NULL;
13700             }
13701 
13702             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13703 
13704             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13705                 return -TARGET_EFAULT;
13706             }
13707         }
13708         return ret;
13709 #endif
13710 
13711 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13712     case TARGET_NR_timerfd_settime64:
13713         {
13714             struct itimerspec its_new, its_old, *p_new;
13715 
13716             if (arg3) {
13717                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13718                     return -TARGET_EFAULT;
13719                 }
13720                 p_new = &its_new;
13721             } else {
13722                 p_new = NULL;
13723             }
13724 
13725             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13726 
13727             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13728                 return -TARGET_EFAULT;
13729             }
13730         }
13731         return ret;
13732 #endif
13733 
13734 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13735     case TARGET_NR_ioprio_get:
13736         return get_errno(ioprio_get(arg1, arg2));
13737 #endif
13738 
13739 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13740     case TARGET_NR_ioprio_set:
13741         return get_errno(ioprio_set(arg1, arg2, arg3));
13742 #endif
13743 
13744 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13745     case TARGET_NR_setns:
13746         return get_errno(setns(arg1, arg2));
13747 #endif
13748 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13749     case TARGET_NR_unshare:
13750         return get_errno(unshare(arg1));
13751 #endif
13752 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13753     case TARGET_NR_kcmp:
13754         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13755 #endif
13756 #ifdef TARGET_NR_swapcontext
13757     case TARGET_NR_swapcontext:
13758         /* PowerPC specific.  */
13759         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13760 #endif
13761 #ifdef TARGET_NR_memfd_create
13762     case TARGET_NR_memfd_create:
13763         p = lock_user_string(arg1);
13764         if (!p) {
13765             return -TARGET_EFAULT;
13766         }
13767         ret = get_errno(memfd_create(p, arg2));
13768         fd_trans_unregister(ret);
13769         unlock_user(p, arg1, 0);
13770         return ret;
13771 #endif
13772 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13773     case TARGET_NR_membarrier:
13774         return get_errno(membarrier(arg1, arg2));
13775 #endif
13776 
13777 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13778     case TARGET_NR_copy_file_range:
13779         {
13780             loff_t inoff, outoff;
13781             loff_t *pinoff = NULL, *poutoff = NULL;
13782 
13783             if (arg2) {
13784                 if (get_user_u64(inoff, arg2)) {
13785                     return -TARGET_EFAULT;
13786                 }
13787                 pinoff = &inoff;
13788             }
13789             if (arg4) {
13790                 if (get_user_u64(outoff, arg4)) {
13791                     return -TARGET_EFAULT;
13792                 }
13793                 poutoff = &outoff;
13794             }
13795             /* Do not sign-extend the count parameter. */
13796             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13797                                                  (abi_ulong)arg5, arg6));
13798             if (!is_error(ret) && ret > 0) {
13799                 if (arg2) {
13800                     if (put_user_u64(inoff, arg2)) {
13801                         return -TARGET_EFAULT;
13802                     }
13803                 }
13804                 if (arg4) {
13805                     if (put_user_u64(outoff, arg4)) {
13806                         return -TARGET_EFAULT;
13807                     }
13808                 }
13809             }
13810         }
13811         return ret;
13812 #endif
13813 
13814 #if defined(TARGET_NR_pivot_root)
13815     case TARGET_NR_pivot_root:
13816         {
13817             void *p2;
13818             p = lock_user_string(arg1); /* new_root */
13819             p2 = lock_user_string(arg2); /* put_old */
13820             if (!p || !p2) {
13821                 ret = -TARGET_EFAULT;
13822             } else {
13823                 ret = get_errno(pivot_root(p, p2));
13824             }
13825             unlock_user(p2, arg2, 0);
13826             unlock_user(p, arg1, 0);
13827         }
13828         return ret;
13829 #endif
13830 
13831 #if defined(TARGET_NR_riscv_hwprobe)
13832     case TARGET_NR_riscv_hwprobe:
13833         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13834 #endif
13835 
13836     default:
13837         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13838         return -TARGET_ENOSYS;
13839     }
13840     return ret;
13841 }
13842 
13843 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13844                     abi_long arg2, abi_long arg3, abi_long arg4,
13845                     abi_long arg5, abi_long arg6, abi_long arg7,
13846                     abi_long arg8)
13847 {
13848     CPUState *cpu = env_cpu(cpu_env);
13849     abi_long ret;
13850 
13851 #ifdef DEBUG_ERESTARTSYS
13852     /* Debug-only code for exercising the syscall-restart code paths
13853      * in the per-architecture cpu main loops: restart every syscall
13854      * the guest makes once before letting it through.
13855      */
13856     {
13857         static bool flag;
13858         flag = !flag;
13859         if (flag) {
13860             return -QEMU_ERESTARTSYS;
13861         }
13862     }
13863 #endif
13864 
13865     record_syscall_start(cpu, num, arg1,
13866                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13867 
13868     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13869         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13870     }
13871 
13872     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13873                       arg5, arg6, arg7, arg8);
13874 
13875     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13876         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13877                           arg3, arg4, arg5, arg6);
13878     }
13879 
13880     record_syscall_return(cpu, num, ret);
13881     return ret;
13882 }
13883