xref: /openbmc/qemu/linux-user/syscall.c (revision e0ddf8ea)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
321           loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663               char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671               struct timespec *, tsp, const sigset_t *, sigmask,
672               size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675               int, maxevents, int, timeout, const sigset_t *, sigmask,
676               size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679               const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683               const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692               unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694               unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696               socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706               const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709               int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712               struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715     defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717               const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722               void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725               void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730               int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734               long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738               unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741     defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743               size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746     defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748               size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752               int, outfd, loff_t *, poutoff, size_t, length,
753               unsigned int, flags)
754 #endif
755 
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757  * "third argument might be integer or pointer or not present" behaviour of
758  * the libc function.
759  */
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763  *  use the flock64 struct rather than unsuffixed flock
764  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765  */
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
771 
772 static inline int host_to_target_sock_type(int host_type)
773 {
774     int target_type;
775 
776     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777     case SOCK_DGRAM:
778         target_type = TARGET_SOCK_DGRAM;
779         break;
780     case SOCK_STREAM:
781         target_type = TARGET_SOCK_STREAM;
782         break;
783     default:
784         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785         break;
786     }
787 
788 #if defined(SOCK_CLOEXEC)
789     if (host_type & SOCK_CLOEXEC) {
790         target_type |= TARGET_SOCK_CLOEXEC;
791     }
792 #endif
793 
794 #if defined(SOCK_NONBLOCK)
795     if (host_type & SOCK_NONBLOCK) {
796         target_type |= TARGET_SOCK_NONBLOCK;
797     }
798 #endif
799 
800     return target_type;
801 }
802 
803 static abi_ulong target_brk;
804 static abi_ulong brk_page;
805 
806 void target_set_brk(abi_ulong new_brk)
807 {
808     target_brk = new_brk;
809     brk_page = HOST_PAGE_ALIGN(target_brk);
810 }
811 
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815     abi_long mapped_addr;
816     abi_ulong new_alloc_size;
817     abi_ulong new_brk, new_host_brk_page;
818 
819     /* brk pointers are always untagged */
820 
821     /* return old brk value if brk_val unchanged or zero */
822     if (!brk_val || brk_val == target_brk) {
823         return target_brk;
824     }
825 
826     new_brk = TARGET_PAGE_ALIGN(brk_val);
827     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
828 
829     /* brk_val and old target_brk might be on the same page */
830     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
831         if (brk_val > target_brk) {
832             /* empty remaining bytes in (possibly larger) host page */
833             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
834         }
835         target_brk = brk_val;
836         return target_brk;
837     }
838 
839     /* Release heap if necesary */
840     if (new_brk < target_brk) {
841         /* empty remaining bytes in (possibly larger) host page */
842         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
843 
844         /* free unused host pages and set new brk_page */
845         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
846         brk_page = new_host_brk_page;
847 
848         target_brk = brk_val;
849         return target_brk;
850     }
851 
852     /* We need to allocate more memory after the brk... Note that
853      * we don't use MAP_FIXED because that will map over the top of
854      * any existing mapping (like the one with the host libc or qemu
855      * itself); instead we treat "mapped but at wrong address" as
856      * a failure and unmap again.
857      */
858     new_alloc_size = new_host_brk_page - brk_page;
859     if (new_alloc_size) {
860         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
861                                         PROT_READ|PROT_WRITE,
862                                         MAP_ANON|MAP_PRIVATE, 0, 0));
863     } else {
864         mapped_addr = brk_page;
865     }
866 
867     if (mapped_addr == brk_page) {
868         /* Heap contents are initialized to zero, as for anonymous
869          * mapped pages.  Technically the new pages are already
870          * initialized to zero since they *are* anonymous mapped
871          * pages, however we have to take care with the contents that
872          * come from the remaining part of the previous page: it may
873          * contains garbage data due to a previous heap usage (grown
874          * then shrunken).  */
875         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
876 
877         target_brk = brk_val;
878         brk_page = new_host_brk_page;
879         return target_brk;
880     } else if (mapped_addr != -1) {
881         /* Mapped but at wrong address, meaning there wasn't actually
882          * enough space for this brk.
883          */
884         target_munmap(mapped_addr, new_alloc_size);
885         mapped_addr = -1;
886     }
887 
888 #if defined(TARGET_ALPHA)
889     /* We (partially) emulate OSF/1 on Alpha, which requires we
890        return a proper errno, not an unchanged brk value.  */
891     return -TARGET_ENOMEM;
892 #endif
893     /* For everything else, return the previous break. */
894     return target_brk;
895 }
896 
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900                                             abi_ulong target_fds_addr,
901                                             int n)
902 {
903     int i, nw, j, k;
904     abi_ulong b, *target_fds;
905 
906     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907     if (!(target_fds = lock_user(VERIFY_READ,
908                                  target_fds_addr,
909                                  sizeof(abi_ulong) * nw,
910                                  1)))
911         return -TARGET_EFAULT;
912 
913     FD_ZERO(fds);
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         /* grab the abi_ulong */
917         __get_user(b, &target_fds[i]);
918         for (j = 0; j < TARGET_ABI_BITS; j++) {
919             /* check the bit inside the abi_ulong */
920             if ((b >> j) & 1)
921                 FD_SET(k, fds);
922             k++;
923         }
924     }
925 
926     unlock_user(target_fds, target_fds_addr, 0);
927 
928     return 0;
929 }
930 
931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932                                                  abi_ulong target_fds_addr,
933                                                  int n)
934 {
935     if (target_fds_addr) {
936         if (copy_from_user_fdset(fds, target_fds_addr, n))
937             return -TARGET_EFAULT;
938         *fds_ptr = fds;
939     } else {
940         *fds_ptr = NULL;
941     }
942     return 0;
943 }
944 
945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946                                           const fd_set *fds,
947                                           int n)
948 {
949     int i, nw, j, k;
950     abi_long v;
951     abi_ulong *target_fds;
952 
953     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954     if (!(target_fds = lock_user(VERIFY_WRITE,
955                                  target_fds_addr,
956                                  sizeof(abi_ulong) * nw,
957                                  0)))
958         return -TARGET_EFAULT;
959 
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         v = 0;
963         for (j = 0; j < TARGET_ABI_BITS; j++) {
964             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965             k++;
966         }
967         __put_user(v, &target_fds[i]);
968     }
969 
970     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
971 
972     return 0;
973 }
974 #endif
975 
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
981 
982 static inline abi_long host_to_target_clock_t(long ticks)
983 {
984 #if HOST_HZ == TARGET_HZ
985     return ticks;
986 #else
987     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
989 }
990 
991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992                                              const struct rusage *rusage)
993 {
994     struct target_rusage *target_rusage;
995 
996     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997         return -TARGET_EFAULT;
998     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016     unlock_user_struct(target_rusage, target_addr, 1);
1017 
1018     return 0;
1019 }
1020 
1021 #ifdef TARGET_NR_setrlimit
1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1023 {
1024     abi_ulong target_rlim_swap;
1025     rlim_t result;
1026 
1027     target_rlim_swap = tswapal(target_rlim);
1028     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029         return RLIM_INFINITY;
1030 
1031     result = target_rlim_swap;
1032     if (target_rlim_swap != (rlim_t)result)
1033         return RLIM_INFINITY;
1034 
1035     return result;
1036 }
1037 #endif
1038 
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1041 {
1042     abi_ulong target_rlim_swap;
1043     abi_ulong result;
1044 
1045     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046         target_rlim_swap = TARGET_RLIM_INFINITY;
1047     else
1048         target_rlim_swap = rlim;
1049     result = tswapal(target_rlim_swap);
1050 
1051     return result;
1052 }
1053 #endif
1054 
1055 static inline int target_to_host_resource(int code)
1056 {
1057     switch (code) {
1058     case TARGET_RLIMIT_AS:
1059         return RLIMIT_AS;
1060     case TARGET_RLIMIT_CORE:
1061         return RLIMIT_CORE;
1062     case TARGET_RLIMIT_CPU:
1063         return RLIMIT_CPU;
1064     case TARGET_RLIMIT_DATA:
1065         return RLIMIT_DATA;
1066     case TARGET_RLIMIT_FSIZE:
1067         return RLIMIT_FSIZE;
1068     case TARGET_RLIMIT_LOCKS:
1069         return RLIMIT_LOCKS;
1070     case TARGET_RLIMIT_MEMLOCK:
1071         return RLIMIT_MEMLOCK;
1072     case TARGET_RLIMIT_MSGQUEUE:
1073         return RLIMIT_MSGQUEUE;
1074     case TARGET_RLIMIT_NICE:
1075         return RLIMIT_NICE;
1076     case TARGET_RLIMIT_NOFILE:
1077         return RLIMIT_NOFILE;
1078     case TARGET_RLIMIT_NPROC:
1079         return RLIMIT_NPROC;
1080     case TARGET_RLIMIT_RSS:
1081         return RLIMIT_RSS;
1082     case TARGET_RLIMIT_RTPRIO:
1083         return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085     case TARGET_RLIMIT_RTTIME:
1086         return RLIMIT_RTTIME;
1087 #endif
1088     case TARGET_RLIMIT_SIGPENDING:
1089         return RLIMIT_SIGPENDING;
1090     case TARGET_RLIMIT_STACK:
1091         return RLIMIT_STACK;
1092     default:
1093         return code;
1094     }
1095 }
1096 
1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098                                               abi_ulong target_tv_addr)
1099 {
1100     struct target_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 
1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115                                             const struct timeval *tv)
1116 {
1117     struct target_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133                                                 abi_ulong target_tv_addr)
1134 {
1135     struct target__kernel_sock_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 #endif
1149 
1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151                                               const struct timeval *tv)
1152 {
1153     struct target__kernel_sock_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 #if defined(TARGET_NR_futex) || \
1168     defined(TARGET_NR_rt_sigtimedwait) || \
1169     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174     defined(TARGET_NR_timer_settime) || \
1175     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177                                                abi_ulong target_addr)
1178 {
1179     struct target_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 #endif
1190 
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192     defined(TARGET_NR_timer_settime64) || \
1193     defined(TARGET_NR_mq_timedsend_time64) || \
1194     defined(TARGET_NR_mq_timedreceive_time64) || \
1195     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196     defined(TARGET_NR_clock_nanosleep_time64) || \
1197     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198     defined(TARGET_NR_utimensat) || \
1199     defined(TARGET_NR_utimensat_time64) || \
1200     defined(TARGET_NR_semtimedop_time64) || \
1201     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203                                                  abi_ulong target_addr)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     /* in 32bit mode, this drops the padding */
1213     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220                                                struct timespec *host_ts)
1221 {
1222     struct target_timespec *target_ts;
1223 
1224     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225         return -TARGET_EFAULT;
1226     }
1227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229     unlock_user_struct(target_ts, target_addr, 1);
1230     return 0;
1231 }
1232 
1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234                                                  struct timespec *host_ts)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 1);
1244     return 0;
1245 }
1246 
1247 #if defined(TARGET_NR_gettimeofday)
1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249                                              struct timezone *tz)
1250 {
1251     struct target_timezone *target_tz;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254         return -TARGET_EFAULT;
1255     }
1256 
1257     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1259 
1260     unlock_user_struct(target_tz, target_tz_addr, 1);
1261 
1262     return 0;
1263 }
1264 #endif
1265 
1266 #if defined(TARGET_NR_settimeofday)
1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268                                                abi_ulong target_tz_addr)
1269 {
1270     struct target_timezone *target_tz;
1271 
1272     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273         return -TARGET_EFAULT;
1274     }
1275 
1276     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1278 
1279     unlock_user_struct(target_tz, target_tz_addr, 0);
1280 
1281     return 0;
1282 }
1283 #endif
1284 
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1287 
1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289                                               abi_ulong target_mq_attr_addr)
1290 {
1291     struct target_mq_attr *target_mq_attr;
1292 
1293     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294                           target_mq_attr_addr, 1))
1295         return -TARGET_EFAULT;
1296 
1297     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1301 
1302     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1303 
1304     return 0;
1305 }
1306 
1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308                                             const struct mq_attr *attr)
1309 {
1310     struct target_mq_attr *target_mq_attr;
1311 
1312     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313                           target_mq_attr_addr, 0))
1314         return -TARGET_EFAULT;
1315 
1316     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1320 
1321     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
1329 static abi_long do_select(int n,
1330                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1331                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1332 {
1333     fd_set rfds, wfds, efds;
1334     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335     struct timeval tv;
1336     struct timespec ts, *ts_ptr;
1337     abi_long ret;
1338 
1339     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340     if (ret) {
1341         return ret;
1342     }
1343     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351 
1352     if (target_tv_addr) {
1353         if (copy_from_user_timeval(&tv, target_tv_addr))
1354             return -TARGET_EFAULT;
1355         ts.tv_sec = tv.tv_sec;
1356         ts.tv_nsec = tv.tv_usec * 1000;
1357         ts_ptr = &ts;
1358     } else {
1359         ts_ptr = NULL;
1360     }
1361 
1362     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363                                   ts_ptr, NULL));
1364 
1365     if (!is_error(ret)) {
1366         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367             return -TARGET_EFAULT;
1368         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369             return -TARGET_EFAULT;
1370         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371             return -TARGET_EFAULT;
1372 
1373         if (target_tv_addr) {
1374             tv.tv_sec = ts.tv_sec;
1375             tv.tv_usec = ts.tv_nsec / 1000;
1376             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377                 return -TARGET_EFAULT;
1378             }
1379         }
1380     }
1381 
1382     return ret;
1383 }
1384 
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1386 static abi_long do_old_select(abi_ulong arg1)
1387 {
1388     struct target_sel_arg_struct *sel;
1389     abi_ulong inp, outp, exp, tvp;
1390     long nsel;
1391 
1392     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393         return -TARGET_EFAULT;
1394     }
1395 
1396     nsel = tswapal(sel->n);
1397     inp = tswapal(sel->inp);
1398     outp = tswapal(sel->outp);
1399     exp = tswapal(sel->exp);
1400     tvp = tswapal(sel->tvp);
1401 
1402     unlock_user_struct(sel, arg1, 0);
1403 
1404     return do_select(nsel, inp, outp, exp, tvp);
1405 }
1406 #endif
1407 #endif
1408 
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411                             abi_long arg4, abi_long arg5, abi_long arg6,
1412                             bool time64)
1413 {
1414     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415     fd_set rfds, wfds, efds;
1416     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417     struct timespec ts, *ts_ptr;
1418     abi_long ret;
1419 
1420     /*
1421      * The 6th arg is actually two args smashed together,
1422      * so we cannot use the C library.
1423      */
1424     struct {
1425         sigset_t *set;
1426         size_t size;
1427     } sig, *sig_ptr;
1428 
1429     abi_ulong arg_sigset, arg_sigsize, *arg7;
1430 
1431     n = arg1;
1432     rfd_addr = arg2;
1433     wfd_addr = arg3;
1434     efd_addr = arg4;
1435     ts_addr = arg5;
1436 
1437     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449 
1450     /*
1451      * This takes a timespec, and not a timeval, so we cannot
1452      * use the do_select() helper ...
1453      */
1454     if (ts_addr) {
1455         if (time64) {
1456             if (target_to_host_timespec64(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         } else {
1460             if (target_to_host_timespec(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         }
1464             ts_ptr = &ts;
1465     } else {
1466         ts_ptr = NULL;
1467     }
1468 
1469     /* Extract the two packed args for the sigset */
1470     sig_ptr = NULL;
1471     if (arg6) {
1472         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473         if (!arg7) {
1474             return -TARGET_EFAULT;
1475         }
1476         arg_sigset = tswapal(arg7[0]);
1477         arg_sigsize = tswapal(arg7[1]);
1478         unlock_user(arg7, arg6, 0);
1479 
1480         if (arg_sigset) {
1481             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482             if (ret != 0) {
1483                 return ret;
1484             }
1485             sig_ptr = &sig;
1486             sig.size = SIGSET_T_SIZE;
1487         }
1488     }
1489 
1490     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491                                   ts_ptr, sig_ptr));
1492 
1493     if (sig_ptr) {
1494         finish_sigsuspend_mask(ret);
1495     }
1496 
1497     if (!is_error(ret)) {
1498         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505             return -TARGET_EFAULT;
1506         }
1507         if (time64) {
1508             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509                 return -TARGET_EFAULT;
1510             }
1511         } else {
1512             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         }
1516     }
1517     return ret;
1518 }
1519 #endif
1520 
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522     defined(TARGET_NR_ppoll_time64)
1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1525 {
1526     struct target_pollfd *target_pfd;
1527     unsigned int nfds = arg2;
1528     struct pollfd *pfd;
1529     unsigned int i;
1530     abi_long ret;
1531 
1532     pfd = NULL;
1533     target_pfd = NULL;
1534     if (nfds) {
1535         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536             return -TARGET_EINVAL;
1537         }
1538         target_pfd = lock_user(VERIFY_WRITE, arg1,
1539                                sizeof(struct target_pollfd) * nfds, 1);
1540         if (!target_pfd) {
1541             return -TARGET_EFAULT;
1542         }
1543 
1544         pfd = alloca(sizeof(struct pollfd) * nfds);
1545         for (i = 0; i < nfds; i++) {
1546             pfd[i].fd = tswap32(target_pfd[i].fd);
1547             pfd[i].events = tswap16(target_pfd[i].events);
1548         }
1549     }
1550     if (ppoll) {
1551         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552         sigset_t *set = NULL;
1553 
1554         if (arg3) {
1555             if (time64) {
1556                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             } else {
1561                 if (target_to_host_timespec(timeout_ts, arg3)) {
1562                     unlock_user(target_pfd, arg1, 0);
1563                     return -TARGET_EFAULT;
1564                 }
1565             }
1566         } else {
1567             timeout_ts = NULL;
1568         }
1569 
1570         if (arg4) {
1571             ret = process_sigsuspend_mask(&set, arg4, arg5);
1572             if (ret != 0) {
1573                 unlock_user(target_pfd, arg1, 0);
1574                 return ret;
1575             }
1576         }
1577 
1578         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579                                    set, SIGSET_T_SIZE));
1580 
1581         if (set) {
1582             finish_sigsuspend_mask(ret);
1583         }
1584         if (!is_error(ret) && arg3) {
1585             if (time64) {
1586                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587                     return -TARGET_EFAULT;
1588                 }
1589             } else {
1590                 if (host_to_target_timespec(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             }
1594         }
1595     } else {
1596           struct timespec ts, *pts;
1597 
1598           if (arg3 >= 0) {
1599               /* Convert ms to secs, ns */
1600               ts.tv_sec = arg3 / 1000;
1601               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602               pts = &ts;
1603           } else {
1604               /* -ve poll() timeout means "infinite" */
1605               pts = NULL;
1606           }
1607           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1608     }
1609 
1610     if (!is_error(ret)) {
1611         for (i = 0; i < nfds; i++) {
1612             target_pfd[i].revents = tswap16(pfd[i].revents);
1613         }
1614     }
1615     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616     return ret;
1617 }
1618 #endif
1619 
1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621                         int flags, int is_pipe2)
1622 {
1623     int host_pipe[2];
1624     abi_long ret;
1625     ret = pipe2(host_pipe, flags);
1626 
1627     if (is_error(ret))
1628         return get_errno(ret);
1629 
1630     /* Several targets have special calling conventions for the original
1631        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1632     if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634         cpu_env->ir[IR_A4] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637         cpu_env->active_tc.gpr[3] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640         cpu_env->gregs[1] = host_pipe[1];
1641         return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643         cpu_env->regwptr[1] = host_pipe[1];
1644         return host_pipe[0];
1645 #endif
1646     }
1647 
1648     if (put_user_s32(host_pipe[0], pipedes)
1649         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650         return -TARGET_EFAULT;
1651     return get_errno(ret);
1652 }
1653 
1654 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1655                                               abi_ulong target_addr,
1656                                               socklen_t len)
1657 {
1658     struct target_ip_mreqn *target_smreqn;
1659 
1660     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_smreqn)
1662         return -TARGET_EFAULT;
1663     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1664     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1665     if (len == sizeof(struct target_ip_mreqn))
1666         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1667     unlock_user(target_smreqn, target_addr, 0);
1668 
1669     return 0;
1670 }
1671 
1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1673                                                abi_ulong target_addr,
1674                                                socklen_t len)
1675 {
1676     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1677     sa_family_t sa_family;
1678     struct target_sockaddr *target_saddr;
1679 
1680     if (fd_trans_target_to_host_addr(fd)) {
1681         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1682     }
1683 
1684     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1685     if (!target_saddr)
1686         return -TARGET_EFAULT;
1687 
1688     sa_family = tswap16(target_saddr->sa_family);
1689 
1690     /* Oops. The caller might send a incomplete sun_path; sun_path
1691      * must be terminated by \0 (see the manual page), but
1692      * unfortunately it is quite common to specify sockaddr_un
1693      * length as "strlen(x->sun_path)" while it should be
1694      * "strlen(...) + 1". We'll fix that here if needed.
1695      * Linux kernel has a similar feature.
1696      */
1697 
1698     if (sa_family == AF_UNIX) {
1699         if (len < unix_maxlen && len > 0) {
1700             char *cp = (char*)target_saddr;
1701 
1702             if ( cp[len-1] && !cp[len] )
1703                 len++;
1704         }
1705         if (len > unix_maxlen)
1706             len = unix_maxlen;
1707     }
1708 
1709     memcpy(addr, target_saddr, len);
1710     addr->sa_family = sa_family;
1711     if (sa_family == AF_NETLINK) {
1712         struct sockaddr_nl *nladdr;
1713 
1714         nladdr = (struct sockaddr_nl *)addr;
1715         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1716         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1717     } else if (sa_family == AF_PACKET) {
1718 	struct target_sockaddr_ll *lladdr;
1719 
1720 	lladdr = (struct target_sockaddr_ll *)addr;
1721 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1722 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1723     } else if (sa_family == AF_INET6) {
1724         struct sockaddr_in6 *in6addr;
1725 
1726         in6addr = (struct sockaddr_in6 *)addr;
1727         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1728     }
1729     unlock_user(target_saddr, target_addr, 0);
1730 
1731     return 0;
1732 }
1733 
1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1735                                                struct sockaddr *addr,
1736                                                socklen_t len)
1737 {
1738     struct target_sockaddr *target_saddr;
1739 
1740     if (len == 0) {
1741         return 0;
1742     }
1743     assert(addr);
1744 
1745     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1746     if (!target_saddr)
1747         return -TARGET_EFAULT;
1748     memcpy(target_saddr, addr, len);
1749     if (len >= offsetof(struct target_sockaddr, sa_family) +
1750         sizeof(target_saddr->sa_family)) {
1751         target_saddr->sa_family = tswap16(addr->sa_family);
1752     }
1753     if (addr->sa_family == AF_NETLINK &&
1754         len >= sizeof(struct target_sockaddr_nl)) {
1755         struct target_sockaddr_nl *target_nl =
1756                (struct target_sockaddr_nl *)target_saddr;
1757         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1758         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1759     } else if (addr->sa_family == AF_PACKET) {
1760         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1761         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1762         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1763     } else if (addr->sa_family == AF_INET6 &&
1764                len >= sizeof(struct target_sockaddr_in6)) {
1765         struct target_sockaddr_in6 *target_in6 =
1766                (struct target_sockaddr_in6 *)target_saddr;
1767         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1768     }
1769     unlock_user(target_saddr, target_addr, len);
1770 
1771     return 0;
1772 }
1773 
1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1775                                            struct target_msghdr *target_msgh)
1776 {
1777     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778     abi_long msg_controllen;
1779     abi_ulong target_cmsg_addr;
1780     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781     socklen_t space = 0;
1782 
1783     msg_controllen = tswapal(target_msgh->msg_controllen);
1784     if (msg_controllen < sizeof (struct target_cmsghdr))
1785         goto the_end;
1786     target_cmsg_addr = tswapal(target_msgh->msg_control);
1787     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1788     target_cmsg_start = target_cmsg;
1789     if (!target_cmsg)
1790         return -TARGET_EFAULT;
1791 
1792     while (cmsg && target_cmsg) {
1793         void *data = CMSG_DATA(cmsg);
1794         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1795 
1796         int len = tswapal(target_cmsg->cmsg_len)
1797             - sizeof(struct target_cmsghdr);
1798 
1799         space += CMSG_SPACE(len);
1800         if (space > msgh->msg_controllen) {
1801             space -= CMSG_SPACE(len);
1802             /* This is a QEMU bug, since we allocated the payload
1803              * area ourselves (unlike overflow in host-to-target
1804              * conversion, which is just the guest giving us a buffer
1805              * that's too small). It can't happen for the payload types
1806              * we currently support; if it becomes an issue in future
1807              * we would need to improve our allocation strategy to
1808              * something more intelligent than "twice the size of the
1809              * target buffer we're reading from".
1810              */
1811             qemu_log_mask(LOG_UNIMP,
1812                           ("Unsupported ancillary data %d/%d: "
1813                            "unhandled msg size\n"),
1814                           tswap32(target_cmsg->cmsg_level),
1815                           tswap32(target_cmsg->cmsg_type));
1816             break;
1817         }
1818 
1819         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1820             cmsg->cmsg_level = SOL_SOCKET;
1821         } else {
1822             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1823         }
1824         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1825         cmsg->cmsg_len = CMSG_LEN(len);
1826 
1827         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1828             int *fd = (int *)data;
1829             int *target_fd = (int *)target_data;
1830             int i, numfds = len / sizeof(int);
1831 
1832             for (i = 0; i < numfds; i++) {
1833                 __get_user(fd[i], target_fd + i);
1834             }
1835         } else if (cmsg->cmsg_level == SOL_SOCKET
1836                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1837             struct ucred *cred = (struct ucred *)data;
1838             struct target_ucred *target_cred =
1839                 (struct target_ucred *)target_data;
1840 
1841             __get_user(cred->pid, &target_cred->pid);
1842             __get_user(cred->uid, &target_cred->uid);
1843             __get_user(cred->gid, &target_cred->gid);
1844         } else if (cmsg->cmsg_level == SOL_ALG) {
1845             uint32_t *dst = (uint32_t *)data;
1846 
1847             memcpy(dst, target_data, len);
1848             /* fix endianess of first 32-bit word */
1849             if (len >= sizeof(uint32_t)) {
1850                 *dst = tswap32(*dst);
1851             }
1852         } else {
1853             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1854                           cmsg->cmsg_level, cmsg->cmsg_type);
1855             memcpy(data, target_data, len);
1856         }
1857 
1858         cmsg = CMSG_NXTHDR(msgh, cmsg);
1859         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1860                                          target_cmsg_start);
1861     }
1862     unlock_user(target_cmsg, target_cmsg_addr, 0);
1863  the_end:
1864     msgh->msg_controllen = space;
1865     return 0;
1866 }
1867 
1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1869                                            struct msghdr *msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1891         int tgt_len, tgt_space;
1892 
1893         /* We never copy a half-header but may copy half-data;
1894          * this is Linux's behaviour in put_cmsg(). Note that
1895          * truncation here is a guest problem (which we report
1896          * to the guest via the CTRUNC bit), unlike truncation
1897          * in target_to_host_cmsg, which is a QEMU bug.
1898          */
1899         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1900             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1901             break;
1902         }
1903 
1904         if (cmsg->cmsg_level == SOL_SOCKET) {
1905             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1906         } else {
1907             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1908         }
1909         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1910 
1911         /* Payload types which need a different size of payload on
1912          * the target must adjust tgt_len here.
1913          */
1914         tgt_len = len;
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SO_TIMESTAMP:
1919                 tgt_len = sizeof(struct target_timeval);
1920                 break;
1921             default:
1922                 break;
1923             }
1924             break;
1925         default:
1926             break;
1927         }
1928 
1929         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1930             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1931             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1932         }
1933 
1934         /* We must now copy-and-convert len bytes of payload
1935          * into tgt_len bytes of destination space. Bear in mind
1936          * that in both source and destination we may be dealing
1937          * with a truncated value!
1938          */
1939         switch (cmsg->cmsg_level) {
1940         case SOL_SOCKET:
1941             switch (cmsg->cmsg_type) {
1942             case SCM_RIGHTS:
1943             {
1944                 int *fd = (int *)data;
1945                 int *target_fd = (int *)target_data;
1946                 int i, numfds = tgt_len / sizeof(int);
1947 
1948                 for (i = 0; i < numfds; i++) {
1949                     __put_user(fd[i], target_fd + i);
1950                 }
1951                 break;
1952             }
1953             case SO_TIMESTAMP:
1954             {
1955                 struct timeval *tv = (struct timeval *)data;
1956                 struct target_timeval *target_tv =
1957                     (struct target_timeval *)target_data;
1958 
1959                 if (len != sizeof(struct timeval) ||
1960                     tgt_len != sizeof(struct target_timeval)) {
1961                     goto unimplemented;
1962                 }
1963 
1964                 /* copy struct timeval to target */
1965                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1966                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1967                 break;
1968             }
1969             case SCM_CREDENTIALS:
1970             {
1971                 struct ucred *cred = (struct ucred *)data;
1972                 struct target_ucred *target_cred =
1973                     (struct target_ucred *)target_data;
1974 
1975                 __put_user(cred->pid, &target_cred->pid);
1976                 __put_user(cred->uid, &target_cred->uid);
1977                 __put_user(cred->gid, &target_cred->gid);
1978                 break;
1979             }
1980             default:
1981                 goto unimplemented;
1982             }
1983             break;
1984 
1985         case SOL_IP:
1986             switch (cmsg->cmsg_type) {
1987             case IP_TTL:
1988             {
1989                 uint32_t *v = (uint32_t *)data;
1990                 uint32_t *t_int = (uint32_t *)target_data;
1991 
1992                 if (len != sizeof(uint32_t) ||
1993                     tgt_len != sizeof(uint32_t)) {
1994                     goto unimplemented;
1995                 }
1996                 __put_user(*v, t_int);
1997                 break;
1998             }
1999             case IP_RECVERR:
2000             {
2001                 struct errhdr_t {
2002                    struct sock_extended_err ee;
2003                    struct sockaddr_in offender;
2004                 };
2005                 struct errhdr_t *errh = (struct errhdr_t *)data;
2006                 struct errhdr_t *target_errh =
2007                     (struct errhdr_t *)target_data;
2008 
2009                 if (len != sizeof(struct errhdr_t) ||
2010                     tgt_len != sizeof(struct errhdr_t)) {
2011                     goto unimplemented;
2012                 }
2013                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2016                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021                     (void *) &errh->offender, sizeof(errh->offender));
2022                 break;
2023             }
2024             default:
2025                 goto unimplemented;
2026             }
2027             break;
2028 
2029         case SOL_IPV6:
2030             switch (cmsg->cmsg_type) {
2031             case IPV6_HOPLIMIT:
2032             {
2033                 uint32_t *v = (uint32_t *)data;
2034                 uint32_t *t_int = (uint32_t *)target_data;
2035 
2036                 if (len != sizeof(uint32_t) ||
2037                     tgt_len != sizeof(uint32_t)) {
2038                     goto unimplemented;
2039                 }
2040                 __put_user(*v, t_int);
2041                 break;
2042             }
2043             case IPV6_RECVERR:
2044             {
2045                 struct errhdr6_t {
2046                    struct sock_extended_err ee;
2047                    struct sockaddr_in6 offender;
2048                 };
2049                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2050                 struct errhdr6_t *target_errh =
2051                     (struct errhdr6_t *)target_data;
2052 
2053                 if (len != sizeof(struct errhdr6_t) ||
2054                     tgt_len != sizeof(struct errhdr6_t)) {
2055                     goto unimplemented;
2056                 }
2057                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2058                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2059                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2060                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2061                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2062                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2063                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2064                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2065                     (void *) &errh->offender, sizeof(errh->offender));
2066                 break;
2067             }
2068             default:
2069                 goto unimplemented;
2070             }
2071             break;
2072 
2073         default:
2074         unimplemented:
2075             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2076                           cmsg->cmsg_level, cmsg->cmsg_type);
2077             memcpy(target_data, data, MIN(len, tgt_len));
2078             if (tgt_len > len) {
2079                 memset(target_data + len, 0, tgt_len - len);
2080             }
2081         }
2082 
2083         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2084         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2085         if (msg_controllen < tgt_space) {
2086             tgt_space = msg_controllen;
2087         }
2088         msg_controllen -= tgt_space;
2089         space += tgt_space;
2090         cmsg = CMSG_NXTHDR(msgh, cmsg);
2091         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2092                                          target_cmsg_start);
2093     }
2094     unlock_user(target_cmsg, target_cmsg_addr, space);
2095  the_end:
2096     target_msgh->msg_controllen = tswapal(space);
2097     return 0;
2098 }
2099 
2100 /* do_setsockopt() Must return target values and target errnos. */
2101 static abi_long do_setsockopt(int sockfd, int level, int optname,
2102                               abi_ulong optval_addr, socklen_t optlen)
2103 {
2104     abi_long ret;
2105     int val;
2106     struct ip_mreqn *ip_mreq;
2107     struct ip_mreq_source *ip_mreq_source;
2108 
2109     switch(level) {
2110     case SOL_TCP:
2111     case SOL_UDP:
2112         /* TCP and UDP options all take an 'int' value.  */
2113         if (optlen < sizeof(uint32_t))
2114             return -TARGET_EINVAL;
2115 
2116         if (get_user_u32(val, optval_addr))
2117             return -TARGET_EFAULT;
2118         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2119         break;
2120     case SOL_IP:
2121         switch(optname) {
2122         case IP_TOS:
2123         case IP_TTL:
2124         case IP_HDRINCL:
2125         case IP_ROUTER_ALERT:
2126         case IP_RECVOPTS:
2127         case IP_RETOPTS:
2128         case IP_PKTINFO:
2129         case IP_MTU_DISCOVER:
2130         case IP_RECVERR:
2131         case IP_RECVTTL:
2132         case IP_RECVTOS:
2133 #ifdef IP_FREEBIND
2134         case IP_FREEBIND:
2135 #endif
2136         case IP_MULTICAST_TTL:
2137         case IP_MULTICAST_LOOP:
2138             val = 0;
2139             if (optlen >= sizeof(uint32_t)) {
2140                 if (get_user_u32(val, optval_addr))
2141                     return -TARGET_EFAULT;
2142             } else if (optlen >= 1) {
2143                 if (get_user_u8(val, optval_addr))
2144                     return -TARGET_EFAULT;
2145             }
2146             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2147             break;
2148         case IP_ADD_MEMBERSHIP:
2149         case IP_DROP_MEMBERSHIP:
2150             if (optlen < sizeof (struct target_ip_mreq) ||
2151                 optlen > sizeof (struct target_ip_mreqn))
2152                 return -TARGET_EINVAL;
2153 
2154             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2155             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2156             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2157             break;
2158 
2159         case IP_BLOCK_SOURCE:
2160         case IP_UNBLOCK_SOURCE:
2161         case IP_ADD_SOURCE_MEMBERSHIP:
2162         case IP_DROP_SOURCE_MEMBERSHIP:
2163             if (optlen != sizeof (struct target_ip_mreq_source))
2164                 return -TARGET_EINVAL;
2165 
2166             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2167             if (!ip_mreq_source) {
2168                 return -TARGET_EFAULT;
2169             }
2170             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2171             unlock_user (ip_mreq_source, optval_addr, 0);
2172             break;
2173 
2174         default:
2175             goto unimplemented;
2176         }
2177         break;
2178     case SOL_IPV6:
2179         switch (optname) {
2180         case IPV6_MTU_DISCOVER:
2181         case IPV6_MTU:
2182         case IPV6_V6ONLY:
2183         case IPV6_RECVPKTINFO:
2184         case IPV6_UNICAST_HOPS:
2185         case IPV6_MULTICAST_HOPS:
2186         case IPV6_MULTICAST_LOOP:
2187         case IPV6_RECVERR:
2188         case IPV6_RECVHOPLIMIT:
2189         case IPV6_2292HOPLIMIT:
2190         case IPV6_CHECKSUM:
2191         case IPV6_ADDRFORM:
2192         case IPV6_2292PKTINFO:
2193         case IPV6_RECVTCLASS:
2194         case IPV6_RECVRTHDR:
2195         case IPV6_2292RTHDR:
2196         case IPV6_RECVHOPOPTS:
2197         case IPV6_2292HOPOPTS:
2198         case IPV6_RECVDSTOPTS:
2199         case IPV6_2292DSTOPTS:
2200         case IPV6_TCLASS:
2201         case IPV6_ADDR_PREFERENCES:
2202 #ifdef IPV6_RECVPATHMTU
2203         case IPV6_RECVPATHMTU:
2204 #endif
2205 #ifdef IPV6_TRANSPARENT
2206         case IPV6_TRANSPARENT:
2207 #endif
2208 #ifdef IPV6_FREEBIND
2209         case IPV6_FREEBIND:
2210 #endif
2211 #ifdef IPV6_RECVORIGDSTADDR
2212         case IPV6_RECVORIGDSTADDR:
2213 #endif
2214             val = 0;
2215             if (optlen < sizeof(uint32_t)) {
2216                 return -TARGET_EINVAL;
2217             }
2218             if (get_user_u32(val, optval_addr)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             ret = get_errno(setsockopt(sockfd, level, optname,
2222                                        &val, sizeof(val)));
2223             break;
2224         case IPV6_PKTINFO:
2225         {
2226             struct in6_pktinfo pki;
2227 
2228             if (optlen < sizeof(pki)) {
2229                 return -TARGET_EINVAL;
2230             }
2231 
2232             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2233                 return -TARGET_EFAULT;
2234             }
2235 
2236             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2237 
2238             ret = get_errno(setsockopt(sockfd, level, optname,
2239                                        &pki, sizeof(pki)));
2240             break;
2241         }
2242         case IPV6_ADD_MEMBERSHIP:
2243         case IPV6_DROP_MEMBERSHIP:
2244         {
2245             struct ipv6_mreq ipv6mreq;
2246 
2247             if (optlen < sizeof(ipv6mreq)) {
2248                 return -TARGET_EINVAL;
2249             }
2250 
2251             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2252                 return -TARGET_EFAULT;
2253             }
2254 
2255             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2256 
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &ipv6mreq, sizeof(ipv6mreq)));
2259             break;
2260         }
2261         default:
2262             goto unimplemented;
2263         }
2264         break;
2265     case SOL_ICMPV6:
2266         switch (optname) {
2267         case ICMPV6_FILTER:
2268         {
2269             struct icmp6_filter icmp6f;
2270 
2271             if (optlen > sizeof(icmp6f)) {
2272                 optlen = sizeof(icmp6f);
2273             }
2274 
2275             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2276                 return -TARGET_EFAULT;
2277             }
2278 
2279             for (val = 0; val < 8; val++) {
2280                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2281             }
2282 
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        &icmp6f, optlen));
2285             break;
2286         }
2287         default:
2288             goto unimplemented;
2289         }
2290         break;
2291     case SOL_RAW:
2292         switch (optname) {
2293         case ICMP_FILTER:
2294         case IPV6_CHECKSUM:
2295             /* those take an u32 value */
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299 
2300             if (get_user_u32(val, optval_addr)) {
2301                 return -TARGET_EFAULT;
2302             }
2303             ret = get_errno(setsockopt(sockfd, level, optname,
2304                                        &val, sizeof(val)));
2305             break;
2306 
2307         default:
2308             goto unimplemented;
2309         }
2310         break;
2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2312     case SOL_ALG:
2313         switch (optname) {
2314         case ALG_SET_KEY:
2315         {
2316             char *alg_key = g_malloc(optlen);
2317 
2318             if (!alg_key) {
2319                 return -TARGET_ENOMEM;
2320             }
2321             if (copy_from_user(alg_key, optval_addr, optlen)) {
2322                 g_free(alg_key);
2323                 return -TARGET_EFAULT;
2324             }
2325             ret = get_errno(setsockopt(sockfd, level, optname,
2326                                        alg_key, optlen));
2327             g_free(alg_key);
2328             break;
2329         }
2330         case ALG_SET_AEAD_AUTHSIZE:
2331         {
2332             ret = get_errno(setsockopt(sockfd, level, optname,
2333                                        NULL, optlen));
2334             break;
2335         }
2336         default:
2337             goto unimplemented;
2338         }
2339         break;
2340 #endif
2341     case TARGET_SOL_SOCKET:
2342         switch (optname) {
2343         case TARGET_SO_RCVTIMEO:
2344         {
2345                 struct timeval tv;
2346 
2347                 optname = SO_RCVTIMEO;
2348 
2349 set_timeout:
2350                 if (optlen != sizeof(struct target_timeval)) {
2351                     return -TARGET_EINVAL;
2352                 }
2353 
2354                 if (copy_from_user_timeval(&tv, optval_addr)) {
2355                     return -TARGET_EFAULT;
2356                 }
2357 
2358                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2359                                 &tv, sizeof(tv)));
2360                 return ret;
2361         }
2362         case TARGET_SO_SNDTIMEO:
2363                 optname = SO_SNDTIMEO;
2364                 goto set_timeout;
2365         case TARGET_SO_ATTACH_FILTER:
2366         {
2367                 struct target_sock_fprog *tfprog;
2368                 struct target_sock_filter *tfilter;
2369                 struct sock_fprog fprog;
2370                 struct sock_filter *filter;
2371                 int i;
2372 
2373                 if (optlen != sizeof(*tfprog)) {
2374                     return -TARGET_EINVAL;
2375                 }
2376                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2377                     return -TARGET_EFAULT;
2378                 }
2379                 if (!lock_user_struct(VERIFY_READ, tfilter,
2380                                       tswapal(tfprog->filter), 0)) {
2381                     unlock_user_struct(tfprog, optval_addr, 1);
2382                     return -TARGET_EFAULT;
2383                 }
2384 
2385                 fprog.len = tswap16(tfprog->len);
2386                 filter = g_try_new(struct sock_filter, fprog.len);
2387                 if (filter == NULL) {
2388                     unlock_user_struct(tfilter, tfprog->filter, 1);
2389                     unlock_user_struct(tfprog, optval_addr, 1);
2390                     return -TARGET_ENOMEM;
2391                 }
2392                 for (i = 0; i < fprog.len; i++) {
2393                     filter[i].code = tswap16(tfilter[i].code);
2394                     filter[i].jt = tfilter[i].jt;
2395                     filter[i].jf = tfilter[i].jf;
2396                     filter[i].k = tswap32(tfilter[i].k);
2397                 }
2398                 fprog.filter = filter;
2399 
2400                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2401                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2402                 g_free(filter);
2403 
2404                 unlock_user_struct(tfilter, tfprog->filter, 1);
2405                 unlock_user_struct(tfprog, optval_addr, 1);
2406                 return ret;
2407         }
2408 	case TARGET_SO_BINDTODEVICE:
2409 	{
2410 		char *dev_ifname, *addr_ifname;
2411 
2412 		if (optlen > IFNAMSIZ - 1) {
2413 		    optlen = IFNAMSIZ - 1;
2414 		}
2415 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2416 		if (!dev_ifname) {
2417 		    return -TARGET_EFAULT;
2418 		}
2419 		optname = SO_BINDTODEVICE;
2420 		addr_ifname = alloca(IFNAMSIZ);
2421 		memcpy(addr_ifname, dev_ifname, optlen);
2422 		addr_ifname[optlen] = 0;
2423 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2424                                            addr_ifname, optlen));
2425 		unlock_user (dev_ifname, optval_addr, 0);
2426 		return ret;
2427 	}
2428         case TARGET_SO_LINGER:
2429         {
2430                 struct linger lg;
2431                 struct target_linger *tlg;
2432 
2433                 if (optlen != sizeof(struct target_linger)) {
2434                     return -TARGET_EINVAL;
2435                 }
2436                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2437                     return -TARGET_EFAULT;
2438                 }
2439                 __get_user(lg.l_onoff, &tlg->l_onoff);
2440                 __get_user(lg.l_linger, &tlg->l_linger);
2441                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2442                                 &lg, sizeof(lg)));
2443                 unlock_user_struct(tlg, optval_addr, 0);
2444                 return ret;
2445         }
2446             /* Options with 'int' argument.  */
2447         case TARGET_SO_DEBUG:
2448 		optname = SO_DEBUG;
2449 		break;
2450         case TARGET_SO_REUSEADDR:
2451 		optname = SO_REUSEADDR;
2452 		break;
2453 #ifdef SO_REUSEPORT
2454         case TARGET_SO_REUSEPORT:
2455                 optname = SO_REUSEPORT;
2456                 break;
2457 #endif
2458         case TARGET_SO_TYPE:
2459 		optname = SO_TYPE;
2460 		break;
2461         case TARGET_SO_ERROR:
2462 		optname = SO_ERROR;
2463 		break;
2464         case TARGET_SO_DONTROUTE:
2465 		optname = SO_DONTROUTE;
2466 		break;
2467         case TARGET_SO_BROADCAST:
2468 		optname = SO_BROADCAST;
2469 		break;
2470         case TARGET_SO_SNDBUF:
2471 		optname = SO_SNDBUF;
2472 		break;
2473         case TARGET_SO_SNDBUFFORCE:
2474                 optname = SO_SNDBUFFORCE;
2475                 break;
2476         case TARGET_SO_RCVBUF:
2477 		optname = SO_RCVBUF;
2478 		break;
2479         case TARGET_SO_RCVBUFFORCE:
2480                 optname = SO_RCVBUFFORCE;
2481                 break;
2482         case TARGET_SO_KEEPALIVE:
2483 		optname = SO_KEEPALIVE;
2484 		break;
2485         case TARGET_SO_OOBINLINE:
2486 		optname = SO_OOBINLINE;
2487 		break;
2488         case TARGET_SO_NO_CHECK:
2489 		optname = SO_NO_CHECK;
2490 		break;
2491         case TARGET_SO_PRIORITY:
2492 		optname = SO_PRIORITY;
2493 		break;
2494 #ifdef SO_BSDCOMPAT
2495         case TARGET_SO_BSDCOMPAT:
2496 		optname = SO_BSDCOMPAT;
2497 		break;
2498 #endif
2499         case TARGET_SO_PASSCRED:
2500 		optname = SO_PASSCRED;
2501 		break;
2502         case TARGET_SO_PASSSEC:
2503                 optname = SO_PASSSEC;
2504                 break;
2505         case TARGET_SO_TIMESTAMP:
2506 		optname = SO_TIMESTAMP;
2507 		break;
2508         case TARGET_SO_RCVLOWAT:
2509 		optname = SO_RCVLOWAT;
2510 		break;
2511         default:
2512             goto unimplemented;
2513         }
2514 	if (optlen < sizeof(uint32_t))
2515             return -TARGET_EINVAL;
2516 
2517 	if (get_user_u32(val, optval_addr))
2518             return -TARGET_EFAULT;
2519 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2520         break;
2521 #ifdef SOL_NETLINK
2522     case SOL_NETLINK:
2523         switch (optname) {
2524         case NETLINK_PKTINFO:
2525         case NETLINK_ADD_MEMBERSHIP:
2526         case NETLINK_DROP_MEMBERSHIP:
2527         case NETLINK_BROADCAST_ERROR:
2528         case NETLINK_NO_ENOBUFS:
2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2530         case NETLINK_LISTEN_ALL_NSID:
2531         case NETLINK_CAP_ACK:
2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2534         case NETLINK_EXT_ACK:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2537         case NETLINK_GET_STRICT_CHK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2539             break;
2540         default:
2541             goto unimplemented;
2542         }
2543         val = 0;
2544         if (optlen < sizeof(uint32_t)) {
2545             return -TARGET_EINVAL;
2546         }
2547         if (get_user_u32(val, optval_addr)) {
2548             return -TARGET_EFAULT;
2549         }
2550         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2551                                    sizeof(val)));
2552         break;
2553 #endif /* SOL_NETLINK */
2554     default:
2555     unimplemented:
2556         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2557                       level, optname);
2558         ret = -TARGET_ENOPROTOOPT;
2559     }
2560     return ret;
2561 }
2562 
2563 /* do_getsockopt() Must return target values and target errnos. */
2564 static abi_long do_getsockopt(int sockfd, int level, int optname,
2565                               abi_ulong optval_addr, abi_ulong optlen)
2566 {
2567     abi_long ret;
2568     int len, val;
2569     socklen_t lv;
2570 
2571     switch(level) {
2572     case TARGET_SOL_SOCKET:
2573         level = SOL_SOCKET;
2574         switch (optname) {
2575         /* These don't just return a single integer */
2576         case TARGET_SO_PEERNAME:
2577             goto unimplemented;
2578         case TARGET_SO_RCVTIMEO: {
2579             struct timeval tv;
2580             socklen_t tvlen;
2581 
2582             optname = SO_RCVTIMEO;
2583 
2584 get_timeout:
2585             if (get_user_u32(len, optlen)) {
2586                 return -TARGET_EFAULT;
2587             }
2588             if (len < 0) {
2589                 return -TARGET_EINVAL;
2590             }
2591 
2592             tvlen = sizeof(tv);
2593             ret = get_errno(getsockopt(sockfd, level, optname,
2594                                        &tv, &tvlen));
2595             if (ret < 0) {
2596                 return ret;
2597             }
2598             if (len > sizeof(struct target_timeval)) {
2599                 len = sizeof(struct target_timeval);
2600             }
2601             if (copy_to_user_timeval(optval_addr, &tv)) {
2602                 return -TARGET_EFAULT;
2603             }
2604             if (put_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             break;
2608         }
2609         case TARGET_SO_SNDTIMEO:
2610             optname = SO_SNDTIMEO;
2611             goto get_timeout;
2612         case TARGET_SO_PEERCRED: {
2613             struct ucred cr;
2614             socklen_t crlen;
2615             struct target_ucred *tcr;
2616 
2617             if (get_user_u32(len, optlen)) {
2618                 return -TARGET_EFAULT;
2619             }
2620             if (len < 0) {
2621                 return -TARGET_EINVAL;
2622             }
2623 
2624             crlen = sizeof(cr);
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2626                                        &cr, &crlen));
2627             if (ret < 0) {
2628                 return ret;
2629             }
2630             if (len > crlen) {
2631                 len = crlen;
2632             }
2633             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2634                 return -TARGET_EFAULT;
2635             }
2636             __put_user(cr.pid, &tcr->pid);
2637             __put_user(cr.uid, &tcr->uid);
2638             __put_user(cr.gid, &tcr->gid);
2639             unlock_user_struct(tcr, optval_addr, 1);
2640             if (put_user_u32(len, optlen)) {
2641                 return -TARGET_EFAULT;
2642             }
2643             break;
2644         }
2645         case TARGET_SO_PEERSEC: {
2646             char *name;
2647 
2648             if (get_user_u32(len, optlen)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             if (len < 0) {
2652                 return -TARGET_EINVAL;
2653             }
2654             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2655             if (!name) {
2656                 return -TARGET_EFAULT;
2657             }
2658             lv = len;
2659             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2660                                        name, &lv));
2661             if (put_user_u32(lv, optlen)) {
2662                 ret = -TARGET_EFAULT;
2663             }
2664             unlock_user(name, optval_addr, lv);
2665             break;
2666         }
2667         case TARGET_SO_LINGER:
2668         {
2669             struct linger lg;
2670             socklen_t lglen;
2671             struct target_linger *tlg;
2672 
2673             if (get_user_u32(len, optlen)) {
2674                 return -TARGET_EFAULT;
2675             }
2676             if (len < 0) {
2677                 return -TARGET_EINVAL;
2678             }
2679 
2680             lglen = sizeof(lg);
2681             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2682                                        &lg, &lglen));
2683             if (ret < 0) {
2684                 return ret;
2685             }
2686             if (len > lglen) {
2687                 len = lglen;
2688             }
2689             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2690                 return -TARGET_EFAULT;
2691             }
2692             __put_user(lg.l_onoff, &tlg->l_onoff);
2693             __put_user(lg.l_linger, &tlg->l_linger);
2694             unlock_user_struct(tlg, optval_addr, 1);
2695             if (put_user_u32(len, optlen)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             break;
2699         }
2700         /* Options with 'int' argument.  */
2701         case TARGET_SO_DEBUG:
2702             optname = SO_DEBUG;
2703             goto int_case;
2704         case TARGET_SO_REUSEADDR:
2705             optname = SO_REUSEADDR;
2706             goto int_case;
2707 #ifdef SO_REUSEPORT
2708         case TARGET_SO_REUSEPORT:
2709             optname = SO_REUSEPORT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_TYPE:
2713             optname = SO_TYPE;
2714             goto int_case;
2715         case TARGET_SO_ERROR:
2716             optname = SO_ERROR;
2717             goto int_case;
2718         case TARGET_SO_DONTROUTE:
2719             optname = SO_DONTROUTE;
2720             goto int_case;
2721         case TARGET_SO_BROADCAST:
2722             optname = SO_BROADCAST;
2723             goto int_case;
2724         case TARGET_SO_SNDBUF:
2725             optname = SO_SNDBUF;
2726             goto int_case;
2727         case TARGET_SO_RCVBUF:
2728             optname = SO_RCVBUF;
2729             goto int_case;
2730         case TARGET_SO_KEEPALIVE:
2731             optname = SO_KEEPALIVE;
2732             goto int_case;
2733         case TARGET_SO_OOBINLINE:
2734             optname = SO_OOBINLINE;
2735             goto int_case;
2736         case TARGET_SO_NO_CHECK:
2737             optname = SO_NO_CHECK;
2738             goto int_case;
2739         case TARGET_SO_PRIORITY:
2740             optname = SO_PRIORITY;
2741             goto int_case;
2742 #ifdef SO_BSDCOMPAT
2743         case TARGET_SO_BSDCOMPAT:
2744             optname = SO_BSDCOMPAT;
2745             goto int_case;
2746 #endif
2747         case TARGET_SO_PASSCRED:
2748             optname = SO_PASSCRED;
2749             goto int_case;
2750         case TARGET_SO_TIMESTAMP:
2751             optname = SO_TIMESTAMP;
2752             goto int_case;
2753         case TARGET_SO_RCVLOWAT:
2754             optname = SO_RCVLOWAT;
2755             goto int_case;
2756         case TARGET_SO_ACCEPTCONN:
2757             optname = SO_ACCEPTCONN;
2758             goto int_case;
2759         case TARGET_SO_PROTOCOL:
2760             optname = SO_PROTOCOL;
2761             goto int_case;
2762         case TARGET_SO_DOMAIN:
2763             optname = SO_DOMAIN;
2764             goto int_case;
2765         default:
2766             goto int_case;
2767         }
2768         break;
2769     case SOL_TCP:
2770     case SOL_UDP:
2771         /* TCP and UDP options all take an 'int' value.  */
2772     int_case:
2773         if (get_user_u32(len, optlen))
2774             return -TARGET_EFAULT;
2775         if (len < 0)
2776             return -TARGET_EINVAL;
2777         lv = sizeof(lv);
2778         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2779         if (ret < 0)
2780             return ret;
2781         switch (optname) {
2782         case SO_TYPE:
2783             val = host_to_target_sock_type(val);
2784             break;
2785         case SO_ERROR:
2786             val = host_to_target_errno(val);
2787             break;
2788         }
2789         if (len > lv)
2790             len = lv;
2791         if (len == 4) {
2792             if (put_user_u32(val, optval_addr))
2793                 return -TARGET_EFAULT;
2794         } else {
2795             if (put_user_u8(val, optval_addr))
2796                 return -TARGET_EFAULT;
2797         }
2798         if (put_user_u32(len, optlen))
2799             return -TARGET_EFAULT;
2800         break;
2801     case SOL_IP:
2802         switch(optname) {
2803         case IP_TOS:
2804         case IP_TTL:
2805         case IP_HDRINCL:
2806         case IP_ROUTER_ALERT:
2807         case IP_RECVOPTS:
2808         case IP_RETOPTS:
2809         case IP_PKTINFO:
2810         case IP_MTU_DISCOVER:
2811         case IP_RECVERR:
2812         case IP_RECVTOS:
2813 #ifdef IP_FREEBIND
2814         case IP_FREEBIND:
2815 #endif
2816         case IP_MULTICAST_TTL:
2817         case IP_MULTICAST_LOOP:
2818             if (get_user_u32(len, optlen))
2819                 return -TARGET_EFAULT;
2820             if (len < 0)
2821                 return -TARGET_EINVAL;
2822             lv = sizeof(lv);
2823             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2824             if (ret < 0)
2825                 return ret;
2826             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2827                 len = 1;
2828                 if (put_user_u32(len, optlen)
2829                     || put_user_u8(val, optval_addr))
2830                     return -TARGET_EFAULT;
2831             } else {
2832                 if (len > sizeof(int))
2833                     len = sizeof(int);
2834                 if (put_user_u32(len, optlen)
2835                     || put_user_u32(val, optval_addr))
2836                     return -TARGET_EFAULT;
2837             }
2838             break;
2839         default:
2840             ret = -TARGET_ENOPROTOOPT;
2841             break;
2842         }
2843         break;
2844     case SOL_IPV6:
2845         switch (optname) {
2846         case IPV6_MTU_DISCOVER:
2847         case IPV6_MTU:
2848         case IPV6_V6ONLY:
2849         case IPV6_RECVPKTINFO:
2850         case IPV6_UNICAST_HOPS:
2851         case IPV6_MULTICAST_HOPS:
2852         case IPV6_MULTICAST_LOOP:
2853         case IPV6_RECVERR:
2854         case IPV6_RECVHOPLIMIT:
2855         case IPV6_2292HOPLIMIT:
2856         case IPV6_CHECKSUM:
2857         case IPV6_ADDRFORM:
2858         case IPV6_2292PKTINFO:
2859         case IPV6_RECVTCLASS:
2860         case IPV6_RECVRTHDR:
2861         case IPV6_2292RTHDR:
2862         case IPV6_RECVHOPOPTS:
2863         case IPV6_2292HOPOPTS:
2864         case IPV6_RECVDSTOPTS:
2865         case IPV6_2292DSTOPTS:
2866         case IPV6_TCLASS:
2867         case IPV6_ADDR_PREFERENCES:
2868 #ifdef IPV6_RECVPATHMTU
2869         case IPV6_RECVPATHMTU:
2870 #endif
2871 #ifdef IPV6_TRANSPARENT
2872         case IPV6_TRANSPARENT:
2873 #endif
2874 #ifdef IPV6_FREEBIND
2875         case IPV6_FREEBIND:
2876 #endif
2877 #ifdef IPV6_RECVORIGDSTADDR
2878         case IPV6_RECVORIGDSTADDR:
2879 #endif
2880             if (get_user_u32(len, optlen))
2881                 return -TARGET_EFAULT;
2882             if (len < 0)
2883                 return -TARGET_EINVAL;
2884             lv = sizeof(lv);
2885             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886             if (ret < 0)
2887                 return ret;
2888             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2889                 len = 1;
2890                 if (put_user_u32(len, optlen)
2891                     || put_user_u8(val, optval_addr))
2892                     return -TARGET_EFAULT;
2893             } else {
2894                 if (len > sizeof(int))
2895                     len = sizeof(int);
2896                 if (put_user_u32(len, optlen)
2897                     || put_user_u32(val, optval_addr))
2898                     return -TARGET_EFAULT;
2899             }
2900             break;
2901         default:
2902             ret = -TARGET_ENOPROTOOPT;
2903             break;
2904         }
2905         break;
2906 #ifdef SOL_NETLINK
2907     case SOL_NETLINK:
2908         switch (optname) {
2909         case NETLINK_PKTINFO:
2910         case NETLINK_BROADCAST_ERROR:
2911         case NETLINK_NO_ENOBUFS:
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2913         case NETLINK_LISTEN_ALL_NSID:
2914         case NETLINK_CAP_ACK:
2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2917         case NETLINK_EXT_ACK:
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2920         case NETLINK_GET_STRICT_CHK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2922             if (get_user_u32(len, optlen)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             if (len != sizeof(val)) {
2926                 return -TARGET_EINVAL;
2927             }
2928             lv = len;
2929             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2930             if (ret < 0) {
2931                 return ret;
2932             }
2933             if (put_user_u32(lv, optlen)
2934                 || put_user_u32(val, optval_addr)) {
2935                 return -TARGET_EFAULT;
2936             }
2937             break;
2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2939         case NETLINK_LIST_MEMBERSHIPS:
2940         {
2941             uint32_t *results;
2942             int i;
2943             if (get_user_u32(len, optlen)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             if (len < 0) {
2947                 return -TARGET_EINVAL;
2948             }
2949             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2950             if (!results && len > 0) {
2951                 return -TARGET_EFAULT;
2952             }
2953             lv = len;
2954             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2955             if (ret < 0) {
2956                 unlock_user(results, optval_addr, 0);
2957                 return ret;
2958             }
2959             /* swap host endianess to target endianess. */
2960             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2961                 results[i] = tswap32(results[i]);
2962             }
2963             if (put_user_u32(lv, optlen)) {
2964                 return -TARGET_EFAULT;
2965             }
2966             unlock_user(results, optval_addr, 0);
2967             break;
2968         }
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2970         default:
2971             goto unimplemented;
2972         }
2973         break;
2974 #endif /* SOL_NETLINK */
2975     default:
2976     unimplemented:
2977         qemu_log_mask(LOG_UNIMP,
2978                       "getsockopt level=%d optname=%d not yet supported\n",
2979                       level, optname);
2980         ret = -TARGET_EOPNOTSUPP;
2981         break;
2982     }
2983     return ret;
2984 }
2985 
2986 /* Convert target low/high pair representing file offset into the host
2987  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2988  * as the kernel doesn't handle them either.
2989  */
2990 static void target_to_host_low_high(abi_ulong tlow,
2991                                     abi_ulong thigh,
2992                                     unsigned long *hlow,
2993                                     unsigned long *hhigh)
2994 {
2995     uint64_t off = tlow |
2996         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2997         TARGET_LONG_BITS / 2;
2998 
2999     *hlow = off;
3000     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3001 }
3002 
3003 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3004                                 abi_ulong count, int copy)
3005 {
3006     struct target_iovec *target_vec;
3007     struct iovec *vec;
3008     abi_ulong total_len, max_len;
3009     int i;
3010     int err = 0;
3011     bool bad_address = false;
3012 
3013     if (count == 0) {
3014         errno = 0;
3015         return NULL;
3016     }
3017     if (count > IOV_MAX) {
3018         errno = EINVAL;
3019         return NULL;
3020     }
3021 
3022     vec = g_try_new0(struct iovec, count);
3023     if (vec == NULL) {
3024         errno = ENOMEM;
3025         return NULL;
3026     }
3027 
3028     target_vec = lock_user(VERIFY_READ, target_addr,
3029                            count * sizeof(struct target_iovec), 1);
3030     if (target_vec == NULL) {
3031         err = EFAULT;
3032         goto fail2;
3033     }
3034 
3035     /* ??? If host page size > target page size, this will result in a
3036        value larger than what we can actually support.  */
3037     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3038     total_len = 0;
3039 
3040     for (i = 0; i < count; i++) {
3041         abi_ulong base = tswapal(target_vec[i].iov_base);
3042         abi_long len = tswapal(target_vec[i].iov_len);
3043 
3044         if (len < 0) {
3045             err = EINVAL;
3046             goto fail;
3047         } else if (len == 0) {
3048             /* Zero length pointer is ignored.  */
3049             vec[i].iov_base = 0;
3050         } else {
3051             vec[i].iov_base = lock_user(type, base, len, copy);
3052             /* If the first buffer pointer is bad, this is a fault.  But
3053              * subsequent bad buffers will result in a partial write; this
3054              * is realized by filling the vector with null pointers and
3055              * zero lengths. */
3056             if (!vec[i].iov_base) {
3057                 if (i == 0) {
3058                     err = EFAULT;
3059                     goto fail;
3060                 } else {
3061                     bad_address = true;
3062                 }
3063             }
3064             if (bad_address) {
3065                 len = 0;
3066             }
3067             if (len > max_len - total_len) {
3068                 len = max_len - total_len;
3069             }
3070         }
3071         vec[i].iov_len = len;
3072         total_len += len;
3073     }
3074 
3075     unlock_user(target_vec, target_addr, 0);
3076     return vec;
3077 
3078  fail:
3079     while (--i >= 0) {
3080         if (tswapal(target_vec[i].iov_len) > 0) {
3081             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3082         }
3083     }
3084     unlock_user(target_vec, target_addr, 0);
3085  fail2:
3086     g_free(vec);
3087     errno = err;
3088     return NULL;
3089 }
3090 
3091 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3092                          abi_ulong count, int copy)
3093 {
3094     struct target_iovec *target_vec;
3095     int i;
3096 
3097     target_vec = lock_user(VERIFY_READ, target_addr,
3098                            count * sizeof(struct target_iovec), 1);
3099     if (target_vec) {
3100         for (i = 0; i < count; i++) {
3101             abi_ulong base = tswapal(target_vec[i].iov_base);
3102             abi_long len = tswapal(target_vec[i].iov_len);
3103             if (len < 0) {
3104                 break;
3105             }
3106             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3107         }
3108         unlock_user(target_vec, target_addr, 0);
3109     }
3110 
3111     g_free(vec);
3112 }
3113 
3114 static inline int target_to_host_sock_type(int *type)
3115 {
3116     int host_type = 0;
3117     int target_type = *type;
3118 
3119     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3120     case TARGET_SOCK_DGRAM:
3121         host_type = SOCK_DGRAM;
3122         break;
3123     case TARGET_SOCK_STREAM:
3124         host_type = SOCK_STREAM;
3125         break;
3126     default:
3127         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3128         break;
3129     }
3130     if (target_type & TARGET_SOCK_CLOEXEC) {
3131 #if defined(SOCK_CLOEXEC)
3132         host_type |= SOCK_CLOEXEC;
3133 #else
3134         return -TARGET_EINVAL;
3135 #endif
3136     }
3137     if (target_type & TARGET_SOCK_NONBLOCK) {
3138 #if defined(SOCK_NONBLOCK)
3139         host_type |= SOCK_NONBLOCK;
3140 #elif !defined(O_NONBLOCK)
3141         return -TARGET_EINVAL;
3142 #endif
3143     }
3144     *type = host_type;
3145     return 0;
3146 }
3147 
3148 /* Try to emulate socket type flags after socket creation.  */
3149 static int sock_flags_fixup(int fd, int target_type)
3150 {
3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3152     if (target_type & TARGET_SOCK_NONBLOCK) {
3153         int flags = fcntl(fd, F_GETFL);
3154         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3155             close(fd);
3156             return -TARGET_EINVAL;
3157         }
3158     }
3159 #endif
3160     return fd;
3161 }
3162 
3163 /* do_socket() Must return target values and target errnos. */
3164 static abi_long do_socket(int domain, int type, int protocol)
3165 {
3166     int target_type = type;
3167     int ret;
3168 
3169     ret = target_to_host_sock_type(&type);
3170     if (ret) {
3171         return ret;
3172     }
3173 
3174     if (domain == PF_NETLINK && !(
3175 #ifdef CONFIG_RTNETLINK
3176          protocol == NETLINK_ROUTE ||
3177 #endif
3178          protocol == NETLINK_KOBJECT_UEVENT ||
3179          protocol == NETLINK_AUDIT)) {
3180         return -TARGET_EPROTONOSUPPORT;
3181     }
3182 
3183     if (domain == AF_PACKET ||
3184         (domain == AF_INET && type == SOCK_PACKET)) {
3185         protocol = tswap16(protocol);
3186     }
3187 
3188     ret = get_errno(socket(domain, type, protocol));
3189     if (ret >= 0) {
3190         ret = sock_flags_fixup(ret, target_type);
3191         if (type == SOCK_PACKET) {
3192             /* Manage an obsolete case :
3193              * if socket type is SOCK_PACKET, bind by name
3194              */
3195             fd_trans_register(ret, &target_packet_trans);
3196         } else if (domain == PF_NETLINK) {
3197             switch (protocol) {
3198 #ifdef CONFIG_RTNETLINK
3199             case NETLINK_ROUTE:
3200                 fd_trans_register(ret, &target_netlink_route_trans);
3201                 break;
3202 #endif
3203             case NETLINK_KOBJECT_UEVENT:
3204                 /* nothing to do: messages are strings */
3205                 break;
3206             case NETLINK_AUDIT:
3207                 fd_trans_register(ret, &target_netlink_audit_trans);
3208                 break;
3209             default:
3210                 g_assert_not_reached();
3211             }
3212         }
3213     }
3214     return ret;
3215 }
3216 
3217 /* do_bind() Must return target values and target errnos. */
3218 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3219                         socklen_t addrlen)
3220 {
3221     void *addr;
3222     abi_long ret;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     addr = alloca(addrlen+1);
3229 
3230     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231     if (ret)
3232         return ret;
3233 
3234     return get_errno(bind(sockfd, addr, addrlen));
3235 }
3236 
3237 /* do_connect() Must return target values and target errnos. */
3238 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3239                            socklen_t addrlen)
3240 {
3241     void *addr;
3242     abi_long ret;
3243 
3244     if ((int)addrlen < 0) {
3245         return -TARGET_EINVAL;
3246     }
3247 
3248     addr = alloca(addrlen+1);
3249 
3250     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3251     if (ret)
3252         return ret;
3253 
3254     return get_errno(safe_connect(sockfd, addr, addrlen));
3255 }
3256 
3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3258 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3259                                       int flags, int send)
3260 {
3261     abi_long ret, len;
3262     struct msghdr msg;
3263     abi_ulong count;
3264     struct iovec *vec;
3265     abi_ulong target_vec;
3266 
3267     if (msgp->msg_name) {
3268         msg.msg_namelen = tswap32(msgp->msg_namelen);
3269         msg.msg_name = alloca(msg.msg_namelen+1);
3270         ret = target_to_host_sockaddr(fd, msg.msg_name,
3271                                       tswapal(msgp->msg_name),
3272                                       msg.msg_namelen);
3273         if (ret == -TARGET_EFAULT) {
3274             /* For connected sockets msg_name and msg_namelen must
3275              * be ignored, so returning EFAULT immediately is wrong.
3276              * Instead, pass a bad msg_name to the host kernel, and
3277              * let it decide whether to return EFAULT or not.
3278              */
3279             msg.msg_name = (void *)-1;
3280         } else if (ret) {
3281             goto out2;
3282         }
3283     } else {
3284         msg.msg_name = NULL;
3285         msg.msg_namelen = 0;
3286     }
3287     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3288     msg.msg_control = alloca(msg.msg_controllen);
3289     memset(msg.msg_control, 0, msg.msg_controllen);
3290 
3291     msg.msg_flags = tswap32(msgp->msg_flags);
3292 
3293     count = tswapal(msgp->msg_iovlen);
3294     target_vec = tswapal(msgp->msg_iov);
3295 
3296     if (count > IOV_MAX) {
3297         /* sendrcvmsg returns a different errno for this condition than
3298          * readv/writev, so we must catch it here before lock_iovec() does.
3299          */
3300         ret = -TARGET_EMSGSIZE;
3301         goto out2;
3302     }
3303 
3304     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3305                      target_vec, count, send);
3306     if (vec == NULL) {
3307         ret = -host_to_target_errno(errno);
3308         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3309         if (!send || ret) {
3310             goto out2;
3311         }
3312     }
3313     msg.msg_iovlen = count;
3314     msg.msg_iov = vec;
3315 
3316     if (send) {
3317         if (fd_trans_target_to_host_data(fd)) {
3318             void *host_msg;
3319 
3320             host_msg = g_malloc(msg.msg_iov->iov_len);
3321             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3322             ret = fd_trans_target_to_host_data(fd)(host_msg,
3323                                                    msg.msg_iov->iov_len);
3324             if (ret >= 0) {
3325                 msg.msg_iov->iov_base = host_msg;
3326                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3327             }
3328             g_free(host_msg);
3329         } else {
3330             ret = target_to_host_cmsg(&msg, msgp);
3331             if (ret == 0) {
3332                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333             }
3334         }
3335     } else {
3336         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3337         if (!is_error(ret)) {
3338             len = ret;
3339             if (fd_trans_host_to_target_data(fd)) {
3340                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3341                                                MIN(msg.msg_iov->iov_len, len));
3342             }
3343             if (!is_error(ret)) {
3344                 ret = host_to_target_cmsg(msgp, &msg);
3345             }
3346             if (!is_error(ret)) {
3347                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3348                 msgp->msg_flags = tswap32(msg.msg_flags);
3349                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3350                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3351                                     msg.msg_name, msg.msg_namelen);
3352                     if (ret) {
3353                         goto out;
3354                     }
3355                 }
3356 
3357                 ret = len;
3358             }
3359         }
3360     }
3361 
3362 out:
3363     if (vec) {
3364         unlock_iovec(vec, target_vec, count, !send);
3365     }
3366 out2:
3367     return ret;
3368 }
3369 
3370 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3371                                int flags, int send)
3372 {
3373     abi_long ret;
3374     struct target_msghdr *msgp;
3375 
3376     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3377                           msgp,
3378                           target_msg,
3379                           send ? 1 : 0)) {
3380         return -TARGET_EFAULT;
3381     }
3382     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3383     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3384     return ret;
3385 }
3386 
3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3388  * so it might not have this *mmsg-specific flag either.
3389  */
3390 #ifndef MSG_WAITFORONE
3391 #define MSG_WAITFORONE 0x10000
3392 #endif
3393 
3394 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3395                                 unsigned int vlen, unsigned int flags,
3396                                 int send)
3397 {
3398     struct target_mmsghdr *mmsgp;
3399     abi_long ret = 0;
3400     int i;
3401 
3402     if (vlen > UIO_MAXIOV) {
3403         vlen = UIO_MAXIOV;
3404     }
3405 
3406     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3407     if (!mmsgp) {
3408         return -TARGET_EFAULT;
3409     }
3410 
3411     for (i = 0; i < vlen; i++) {
3412         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3413         if (is_error(ret)) {
3414             break;
3415         }
3416         mmsgp[i].msg_len = tswap32(ret);
3417         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3418         if (flags & MSG_WAITFORONE) {
3419             flags |= MSG_DONTWAIT;
3420         }
3421     }
3422 
3423     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3424 
3425     /* Return number of datagrams sent if we sent any at all;
3426      * otherwise return the error.
3427      */
3428     if (i) {
3429         return i;
3430     }
3431     return ret;
3432 }
3433 
3434 /* do_accept4() Must return target values and target errnos. */
3435 static abi_long do_accept4(int fd, abi_ulong target_addr,
3436                            abi_ulong target_addrlen_addr, int flags)
3437 {
3438     socklen_t addrlen, ret_addrlen;
3439     void *addr;
3440     abi_long ret;
3441     int host_flags;
3442 
3443     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3444 
3445     if (target_addr == 0) {
3446         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3447     }
3448 
3449     /* linux returns EFAULT if addrlen pointer is invalid */
3450     if (get_user_u32(addrlen, target_addrlen_addr))
3451         return -TARGET_EFAULT;
3452 
3453     if ((int)addrlen < 0) {
3454         return -TARGET_EINVAL;
3455     }
3456 
3457     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3458         return -TARGET_EFAULT;
3459     }
3460 
3461     addr = alloca(addrlen);
3462 
3463     ret_addrlen = addrlen;
3464     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3465     if (!is_error(ret)) {
3466         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3467         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3468             ret = -TARGET_EFAULT;
3469         }
3470     }
3471     return ret;
3472 }
3473 
3474 /* do_getpeername() Must return target values and target errnos. */
3475 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3476                                abi_ulong target_addrlen_addr)
3477 {
3478     socklen_t addrlen, ret_addrlen;
3479     void *addr;
3480     abi_long ret;
3481 
3482     if (get_user_u32(addrlen, target_addrlen_addr))
3483         return -TARGET_EFAULT;
3484 
3485     if ((int)addrlen < 0) {
3486         return -TARGET_EINVAL;
3487     }
3488 
3489     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3490         return -TARGET_EFAULT;
3491     }
3492 
3493     addr = alloca(addrlen);
3494 
3495     ret_addrlen = addrlen;
3496     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3497     if (!is_error(ret)) {
3498         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3499         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3500             ret = -TARGET_EFAULT;
3501         }
3502     }
3503     return ret;
3504 }
3505 
3506 /* do_getsockname() Must return target values and target errnos. */
3507 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3508                                abi_ulong target_addrlen_addr)
3509 {
3510     socklen_t addrlen, ret_addrlen;
3511     void *addr;
3512     abi_long ret;
3513 
3514     if (get_user_u32(addrlen, target_addrlen_addr))
3515         return -TARGET_EFAULT;
3516 
3517     if ((int)addrlen < 0) {
3518         return -TARGET_EINVAL;
3519     }
3520 
3521     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3522         return -TARGET_EFAULT;
3523     }
3524 
3525     addr = alloca(addrlen);
3526 
3527     ret_addrlen = addrlen;
3528     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3529     if (!is_error(ret)) {
3530         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3531         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3532             ret = -TARGET_EFAULT;
3533         }
3534     }
3535     return ret;
3536 }
3537 
3538 /* do_socketpair() Must return target values and target errnos. */
3539 static abi_long do_socketpair(int domain, int type, int protocol,
3540                               abi_ulong target_tab_addr)
3541 {
3542     int tab[2];
3543     abi_long ret;
3544 
3545     target_to_host_sock_type(&type);
3546 
3547     ret = get_errno(socketpair(domain, type, protocol, tab));
3548     if (!is_error(ret)) {
3549         if (put_user_s32(tab[0], target_tab_addr)
3550             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3551             ret = -TARGET_EFAULT;
3552     }
3553     return ret;
3554 }
3555 
3556 /* do_sendto() Must return target values and target errnos. */
3557 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3558                           abi_ulong target_addr, socklen_t addrlen)
3559 {
3560     void *addr;
3561     void *host_msg;
3562     void *copy_msg = NULL;
3563     abi_long ret;
3564 
3565     if ((int)addrlen < 0) {
3566         return -TARGET_EINVAL;
3567     }
3568 
3569     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3570     if (!host_msg)
3571         return -TARGET_EFAULT;
3572     if (fd_trans_target_to_host_data(fd)) {
3573         copy_msg = host_msg;
3574         host_msg = g_malloc(len);
3575         memcpy(host_msg, copy_msg, len);
3576         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3577         if (ret < 0) {
3578             goto fail;
3579         }
3580     }
3581     if (target_addr) {
3582         addr = alloca(addrlen+1);
3583         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3584         if (ret) {
3585             goto fail;
3586         }
3587         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3588     } else {
3589         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3590     }
3591 fail:
3592     if (copy_msg) {
3593         g_free(host_msg);
3594         host_msg = copy_msg;
3595     }
3596     unlock_user(host_msg, msg, 0);
3597     return ret;
3598 }
3599 
3600 /* do_recvfrom() Must return target values and target errnos. */
3601 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3602                             abi_ulong target_addr,
3603                             abi_ulong target_addrlen)
3604 {
3605     socklen_t addrlen, ret_addrlen;
3606     void *addr;
3607     void *host_msg;
3608     abi_long ret;
3609 
3610     if (!msg) {
3611         host_msg = NULL;
3612     } else {
3613         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3614         if (!host_msg) {
3615             return -TARGET_EFAULT;
3616         }
3617     }
3618     if (target_addr) {
3619         if (get_user_u32(addrlen, target_addrlen)) {
3620             ret = -TARGET_EFAULT;
3621             goto fail;
3622         }
3623         if ((int)addrlen < 0) {
3624             ret = -TARGET_EINVAL;
3625             goto fail;
3626         }
3627         addr = alloca(addrlen);
3628         ret_addrlen = addrlen;
3629         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3630                                       addr, &ret_addrlen));
3631     } else {
3632         addr = NULL; /* To keep compiler quiet.  */
3633         addrlen = 0; /* To keep compiler quiet.  */
3634         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3635     }
3636     if (!is_error(ret)) {
3637         if (fd_trans_host_to_target_data(fd)) {
3638             abi_long trans;
3639             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3640             if (is_error(trans)) {
3641                 ret = trans;
3642                 goto fail;
3643             }
3644         }
3645         if (target_addr) {
3646             host_to_target_sockaddr(target_addr, addr,
3647                                     MIN(addrlen, ret_addrlen));
3648             if (put_user_u32(ret_addrlen, target_addrlen)) {
3649                 ret = -TARGET_EFAULT;
3650                 goto fail;
3651             }
3652         }
3653         unlock_user(host_msg, msg, len);
3654     } else {
3655 fail:
3656         unlock_user(host_msg, msg, 0);
3657     }
3658     return ret;
3659 }
3660 
3661 #ifdef TARGET_NR_socketcall
3662 /* do_socketcall() must return target values and target errnos. */
3663 static abi_long do_socketcall(int num, abi_ulong vptr)
3664 {
3665     static const unsigned nargs[] = { /* number of arguments per operation */
3666         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3667         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3668         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3669         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3670         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3671         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3672         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3673         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3674         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3675         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3676         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3677         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3678         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3679         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3680         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3681         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3682         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3683         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3684         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3685         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3686     };
3687     abi_long a[6]; /* max 6 args */
3688     unsigned i;
3689 
3690     /* check the range of the first argument num */
3691     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3692     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3693         return -TARGET_EINVAL;
3694     }
3695     /* ensure we have space for args */
3696     if (nargs[num] > ARRAY_SIZE(a)) {
3697         return -TARGET_EINVAL;
3698     }
3699     /* collect the arguments in a[] according to nargs[] */
3700     for (i = 0; i < nargs[num]; ++i) {
3701         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3702             return -TARGET_EFAULT;
3703         }
3704     }
3705     /* now when we have the args, invoke the appropriate underlying function */
3706     switch (num) {
3707     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3708         return do_socket(a[0], a[1], a[2]);
3709     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3710         return do_bind(a[0], a[1], a[2]);
3711     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3712         return do_connect(a[0], a[1], a[2]);
3713     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3714         return get_errno(listen(a[0], a[1]));
3715     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3716         return do_accept4(a[0], a[1], a[2], 0);
3717     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3718         return do_getsockname(a[0], a[1], a[2]);
3719     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3720         return do_getpeername(a[0], a[1], a[2]);
3721     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3722         return do_socketpair(a[0], a[1], a[2], a[3]);
3723     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3724         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3725     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3726         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3727     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3728         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3729     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3730         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3731     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3732         return get_errno(shutdown(a[0], a[1]));
3733     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3734         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3735     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3736         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3737     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3738         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3739     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3740         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3741     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3742         return do_accept4(a[0], a[1], a[2], a[3]);
3743     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3744         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3745     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3746         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3747     default:
3748         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3749         return -TARGET_EINVAL;
3750     }
3751 }
3752 #endif
3753 
3754 #define N_SHM_REGIONS	32
3755 
3756 static struct shm_region {
3757     abi_ulong start;
3758     abi_ulong size;
3759     bool in_use;
3760 } shm_regions[N_SHM_REGIONS];
3761 
3762 #ifndef TARGET_SEMID64_DS
3763 /* asm-generic version of this struct */
3764 struct target_semid64_ds
3765 {
3766   struct target_ipc_perm sem_perm;
3767   abi_ulong sem_otime;
3768 #if TARGET_ABI_BITS == 32
3769   abi_ulong __unused1;
3770 #endif
3771   abi_ulong sem_ctime;
3772 #if TARGET_ABI_BITS == 32
3773   abi_ulong __unused2;
3774 #endif
3775   abi_ulong sem_nsems;
3776   abi_ulong __unused3;
3777   abi_ulong __unused4;
3778 };
3779 #endif
3780 
3781 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3782                                                abi_ulong target_addr)
3783 {
3784     struct target_ipc_perm *target_ip;
3785     struct target_semid64_ds *target_sd;
3786 
3787     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3788         return -TARGET_EFAULT;
3789     target_ip = &(target_sd->sem_perm);
3790     host_ip->__key = tswap32(target_ip->__key);
3791     host_ip->uid = tswap32(target_ip->uid);
3792     host_ip->gid = tswap32(target_ip->gid);
3793     host_ip->cuid = tswap32(target_ip->cuid);
3794     host_ip->cgid = tswap32(target_ip->cgid);
3795 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3796     host_ip->mode = tswap32(target_ip->mode);
3797 #else
3798     host_ip->mode = tswap16(target_ip->mode);
3799 #endif
3800 #if defined(TARGET_PPC)
3801     host_ip->__seq = tswap32(target_ip->__seq);
3802 #else
3803     host_ip->__seq = tswap16(target_ip->__seq);
3804 #endif
3805     unlock_user_struct(target_sd, target_addr, 0);
3806     return 0;
3807 }
3808 
3809 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3810                                                struct ipc_perm *host_ip)
3811 {
3812     struct target_ipc_perm *target_ip;
3813     struct target_semid64_ds *target_sd;
3814 
3815     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3816         return -TARGET_EFAULT;
3817     target_ip = &(target_sd->sem_perm);
3818     target_ip->__key = tswap32(host_ip->__key);
3819     target_ip->uid = tswap32(host_ip->uid);
3820     target_ip->gid = tswap32(host_ip->gid);
3821     target_ip->cuid = tswap32(host_ip->cuid);
3822     target_ip->cgid = tswap32(host_ip->cgid);
3823 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3824     target_ip->mode = tswap32(host_ip->mode);
3825 #else
3826     target_ip->mode = tswap16(host_ip->mode);
3827 #endif
3828 #if defined(TARGET_PPC)
3829     target_ip->__seq = tswap32(host_ip->__seq);
3830 #else
3831     target_ip->__seq = tswap16(host_ip->__seq);
3832 #endif
3833     unlock_user_struct(target_sd, target_addr, 1);
3834     return 0;
3835 }
3836 
3837 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3838                                                abi_ulong target_addr)
3839 {
3840     struct target_semid64_ds *target_sd;
3841 
3842     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3843         return -TARGET_EFAULT;
3844     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3845         return -TARGET_EFAULT;
3846     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3847     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3848     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3849     unlock_user_struct(target_sd, target_addr, 0);
3850     return 0;
3851 }
3852 
3853 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3854                                                struct semid_ds *host_sd)
3855 {
3856     struct target_semid64_ds *target_sd;
3857 
3858     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3859         return -TARGET_EFAULT;
3860     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3861         return -TARGET_EFAULT;
3862     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3863     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3864     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3865     unlock_user_struct(target_sd, target_addr, 1);
3866     return 0;
3867 }
3868 
3869 struct target_seminfo {
3870     int semmap;
3871     int semmni;
3872     int semmns;
3873     int semmnu;
3874     int semmsl;
3875     int semopm;
3876     int semume;
3877     int semusz;
3878     int semvmx;
3879     int semaem;
3880 };
3881 
3882 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3883                                               struct seminfo *host_seminfo)
3884 {
3885     struct target_seminfo *target_seminfo;
3886     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3887         return -TARGET_EFAULT;
3888     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3889     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3890     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3891     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3892     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3893     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3894     __put_user(host_seminfo->semume, &target_seminfo->semume);
3895     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3896     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3897     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3898     unlock_user_struct(target_seminfo, target_addr, 1);
3899     return 0;
3900 }
3901 
3902 union semun {
3903 	int val;
3904 	struct semid_ds *buf;
3905 	unsigned short *array;
3906 	struct seminfo *__buf;
3907 };
3908 
3909 union target_semun {
3910 	int val;
3911 	abi_ulong buf;
3912 	abi_ulong array;
3913 	abi_ulong __buf;
3914 };
3915 
3916 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3917                                                abi_ulong target_addr)
3918 {
3919     int nsems;
3920     unsigned short *array;
3921     union semun semun;
3922     struct semid_ds semid_ds;
3923     int i, ret;
3924 
3925     semun.buf = &semid_ds;
3926 
3927     ret = semctl(semid, 0, IPC_STAT, semun);
3928     if (ret == -1)
3929         return get_errno(ret);
3930 
3931     nsems = semid_ds.sem_nsems;
3932 
3933     *host_array = g_try_new(unsigned short, nsems);
3934     if (!*host_array) {
3935         return -TARGET_ENOMEM;
3936     }
3937     array = lock_user(VERIFY_READ, target_addr,
3938                       nsems*sizeof(unsigned short), 1);
3939     if (!array) {
3940         g_free(*host_array);
3941         return -TARGET_EFAULT;
3942     }
3943 
3944     for(i=0; i<nsems; i++) {
3945         __get_user((*host_array)[i], &array[i]);
3946     }
3947     unlock_user(array, target_addr, 0);
3948 
3949     return 0;
3950 }
3951 
3952 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3953                                                unsigned short **host_array)
3954 {
3955     int nsems;
3956     unsigned short *array;
3957     union semun semun;
3958     struct semid_ds semid_ds;
3959     int i, ret;
3960 
3961     semun.buf = &semid_ds;
3962 
3963     ret = semctl(semid, 0, IPC_STAT, semun);
3964     if (ret == -1)
3965         return get_errno(ret);
3966 
3967     nsems = semid_ds.sem_nsems;
3968 
3969     array = lock_user(VERIFY_WRITE, target_addr,
3970                       nsems*sizeof(unsigned short), 0);
3971     if (!array)
3972         return -TARGET_EFAULT;
3973 
3974     for(i=0; i<nsems; i++) {
3975         __put_user((*host_array)[i], &array[i]);
3976     }
3977     g_free(*host_array);
3978     unlock_user(array, target_addr, 1);
3979 
3980     return 0;
3981 }
3982 
3983 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3984                                  abi_ulong target_arg)
3985 {
3986     union target_semun target_su = { .buf = target_arg };
3987     union semun arg;
3988     struct semid_ds dsarg;
3989     unsigned short *array = NULL;
3990     struct seminfo seminfo;
3991     abi_long ret = -TARGET_EINVAL;
3992     abi_long err;
3993     cmd &= 0xff;
3994 
3995     switch( cmd ) {
3996 	case GETVAL:
3997 	case SETVAL:
3998             /* In 64 bit cross-endian situations, we will erroneously pick up
3999              * the wrong half of the union for the "val" element.  To rectify
4000              * this, the entire 8-byte structure is byteswapped, followed by
4001 	     * a swap of the 4 byte val field. In other cases, the data is
4002 	     * already in proper host byte order. */
4003 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4004 		target_su.buf = tswapal(target_su.buf);
4005 		arg.val = tswap32(target_su.val);
4006 	    } else {
4007 		arg.val = target_su.val;
4008 	    }
4009             ret = get_errno(semctl(semid, semnum, cmd, arg));
4010             break;
4011 	case GETALL:
4012 	case SETALL:
4013             err = target_to_host_semarray(semid, &array, target_su.array);
4014             if (err)
4015                 return err;
4016             arg.array = array;
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             err = host_to_target_semarray(semid, target_su.array, &array);
4019             if (err)
4020                 return err;
4021             break;
4022 	case IPC_STAT:
4023 	case IPC_SET:
4024 	case SEM_STAT:
4025             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4026             if (err)
4027                 return err;
4028             arg.buf = &dsarg;
4029             ret = get_errno(semctl(semid, semnum, cmd, arg));
4030             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4031             if (err)
4032                 return err;
4033             break;
4034 	case IPC_INFO:
4035 	case SEM_INFO:
4036             arg.__buf = &seminfo;
4037             ret = get_errno(semctl(semid, semnum, cmd, arg));
4038             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4039             if (err)
4040                 return err;
4041             break;
4042 	case IPC_RMID:
4043 	case GETPID:
4044 	case GETNCNT:
4045 	case GETZCNT:
4046             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4047             break;
4048     }
4049 
4050     return ret;
4051 }
4052 
4053 struct target_sembuf {
4054     unsigned short sem_num;
4055     short sem_op;
4056     short sem_flg;
4057 };
4058 
4059 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4060                                              abi_ulong target_addr,
4061                                              unsigned nsops)
4062 {
4063     struct target_sembuf *target_sembuf;
4064     int i;
4065 
4066     target_sembuf = lock_user(VERIFY_READ, target_addr,
4067                               nsops*sizeof(struct target_sembuf), 1);
4068     if (!target_sembuf)
4069         return -TARGET_EFAULT;
4070 
4071     for(i=0; i<nsops; i++) {
4072         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4073         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4074         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4075     }
4076 
4077     unlock_user(target_sembuf, target_addr, 0);
4078 
4079     return 0;
4080 }
4081 
4082 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4083     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4084 
4085 /*
4086  * This macro is required to handle the s390 variants, which passes the
4087  * arguments in a different order than default.
4088  */
4089 #ifdef __s390x__
4090 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4091   (__nsops), (__timeout), (__sops)
4092 #else
4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4094   (__nsops), 0, (__sops), (__timeout)
4095 #endif
4096 
4097 static inline abi_long do_semtimedop(int semid,
4098                                      abi_long ptr,
4099                                      unsigned nsops,
4100                                      abi_long timeout, bool time64)
4101 {
4102     struct sembuf *sops;
4103     struct timespec ts, *pts = NULL;
4104     abi_long ret;
4105 
4106     if (timeout) {
4107         pts = &ts;
4108         if (time64) {
4109             if (target_to_host_timespec64(pts, timeout)) {
4110                 return -TARGET_EFAULT;
4111             }
4112         } else {
4113             if (target_to_host_timespec(pts, timeout)) {
4114                 return -TARGET_EFAULT;
4115             }
4116         }
4117     }
4118 
4119     if (nsops > TARGET_SEMOPM) {
4120         return -TARGET_E2BIG;
4121     }
4122 
4123     sops = g_new(struct sembuf, nsops);
4124 
4125     if (target_to_host_sembuf(sops, ptr, nsops)) {
4126         g_free(sops);
4127         return -TARGET_EFAULT;
4128     }
4129 
4130     ret = -TARGET_ENOSYS;
4131 #ifdef __NR_semtimedop
4132     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4133 #endif
4134 #ifdef __NR_ipc
4135     if (ret == -TARGET_ENOSYS) {
4136         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4137                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4138     }
4139 #endif
4140     g_free(sops);
4141     return ret;
4142 }
4143 #endif
4144 
4145 struct target_msqid_ds
4146 {
4147     struct target_ipc_perm msg_perm;
4148     abi_ulong msg_stime;
4149 #if TARGET_ABI_BITS == 32
4150     abi_ulong __unused1;
4151 #endif
4152     abi_ulong msg_rtime;
4153 #if TARGET_ABI_BITS == 32
4154     abi_ulong __unused2;
4155 #endif
4156     abi_ulong msg_ctime;
4157 #if TARGET_ABI_BITS == 32
4158     abi_ulong __unused3;
4159 #endif
4160     abi_ulong __msg_cbytes;
4161     abi_ulong msg_qnum;
4162     abi_ulong msg_qbytes;
4163     abi_ulong msg_lspid;
4164     abi_ulong msg_lrpid;
4165     abi_ulong __unused4;
4166     abi_ulong __unused5;
4167 };
4168 
4169 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4170                                                abi_ulong target_addr)
4171 {
4172     struct target_msqid_ds *target_md;
4173 
4174     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4175         return -TARGET_EFAULT;
4176     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4177         return -TARGET_EFAULT;
4178     host_md->msg_stime = tswapal(target_md->msg_stime);
4179     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4180     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4181     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4182     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4183     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4184     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4185     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4186     unlock_user_struct(target_md, target_addr, 0);
4187     return 0;
4188 }
4189 
4190 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4191                                                struct msqid_ds *host_md)
4192 {
4193     struct target_msqid_ds *target_md;
4194 
4195     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4196         return -TARGET_EFAULT;
4197     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4198         return -TARGET_EFAULT;
4199     target_md->msg_stime = tswapal(host_md->msg_stime);
4200     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4201     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4202     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4203     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4204     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4205     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4206     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4207     unlock_user_struct(target_md, target_addr, 1);
4208     return 0;
4209 }
4210 
4211 struct target_msginfo {
4212     int msgpool;
4213     int msgmap;
4214     int msgmax;
4215     int msgmnb;
4216     int msgmni;
4217     int msgssz;
4218     int msgtql;
4219     unsigned short int msgseg;
4220 };
4221 
4222 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4223                                               struct msginfo *host_msginfo)
4224 {
4225     struct target_msginfo *target_msginfo;
4226     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4227         return -TARGET_EFAULT;
4228     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4229     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4230     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4231     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4232     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4233     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4234     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4235     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4236     unlock_user_struct(target_msginfo, target_addr, 1);
4237     return 0;
4238 }
4239 
4240 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4241 {
4242     struct msqid_ds dsarg;
4243     struct msginfo msginfo;
4244     abi_long ret = -TARGET_EINVAL;
4245 
4246     cmd &= 0xff;
4247 
4248     switch (cmd) {
4249     case IPC_STAT:
4250     case IPC_SET:
4251     case MSG_STAT:
4252         if (target_to_host_msqid_ds(&dsarg,ptr))
4253             return -TARGET_EFAULT;
4254         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4255         if (host_to_target_msqid_ds(ptr,&dsarg))
4256             return -TARGET_EFAULT;
4257         break;
4258     case IPC_RMID:
4259         ret = get_errno(msgctl(msgid, cmd, NULL));
4260         break;
4261     case IPC_INFO:
4262     case MSG_INFO:
4263         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4264         if (host_to_target_msginfo(ptr, &msginfo))
4265             return -TARGET_EFAULT;
4266         break;
4267     }
4268 
4269     return ret;
4270 }
4271 
4272 struct target_msgbuf {
4273     abi_long mtype;
4274     char	mtext[1];
4275 };
4276 
4277 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4278                                  ssize_t msgsz, int msgflg)
4279 {
4280     struct target_msgbuf *target_mb;
4281     struct msgbuf *host_mb;
4282     abi_long ret = 0;
4283 
4284     if (msgsz < 0) {
4285         return -TARGET_EINVAL;
4286     }
4287 
4288     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4289         return -TARGET_EFAULT;
4290     host_mb = g_try_malloc(msgsz + sizeof(long));
4291     if (!host_mb) {
4292         unlock_user_struct(target_mb, msgp, 0);
4293         return -TARGET_ENOMEM;
4294     }
4295     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4296     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4297     ret = -TARGET_ENOSYS;
4298 #ifdef __NR_msgsnd
4299     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4300 #endif
4301 #ifdef __NR_ipc
4302     if (ret == -TARGET_ENOSYS) {
4303 #ifdef __s390x__
4304         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4305                                  host_mb));
4306 #else
4307         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4308                                  host_mb, 0));
4309 #endif
4310     }
4311 #endif
4312     g_free(host_mb);
4313     unlock_user_struct(target_mb, msgp, 0);
4314 
4315     return ret;
4316 }
4317 
4318 #ifdef __NR_ipc
4319 #if defined(__sparc__)
4320 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4321 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4322 #elif defined(__s390x__)
4323 /* The s390 sys_ipc variant has only five parameters.  */
4324 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4325     ((long int[]){(long int)__msgp, __msgtyp})
4326 #else
4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4328     ((long int[]){(long int)__msgp, __msgtyp}), 0
4329 #endif
4330 #endif
4331 
4332 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4333                                  ssize_t msgsz, abi_long msgtyp,
4334                                  int msgflg)
4335 {
4336     struct target_msgbuf *target_mb;
4337     char *target_mtext;
4338     struct msgbuf *host_mb;
4339     abi_long ret = 0;
4340 
4341     if (msgsz < 0) {
4342         return -TARGET_EINVAL;
4343     }
4344 
4345     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4346         return -TARGET_EFAULT;
4347 
4348     host_mb = g_try_malloc(msgsz + sizeof(long));
4349     if (!host_mb) {
4350         ret = -TARGET_ENOMEM;
4351         goto end;
4352     }
4353     ret = -TARGET_ENOSYS;
4354 #ifdef __NR_msgrcv
4355     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4356 #endif
4357 #ifdef __NR_ipc
4358     if (ret == -TARGET_ENOSYS) {
4359         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4360                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4361     }
4362 #endif
4363 
4364     if (ret > 0) {
4365         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4366         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4367         if (!target_mtext) {
4368             ret = -TARGET_EFAULT;
4369             goto end;
4370         }
4371         memcpy(target_mb->mtext, host_mb->mtext, ret);
4372         unlock_user(target_mtext, target_mtext_addr, ret);
4373     }
4374 
4375     target_mb->mtype = tswapal(host_mb->mtype);
4376 
4377 end:
4378     if (target_mb)
4379         unlock_user_struct(target_mb, msgp, 1);
4380     g_free(host_mb);
4381     return ret;
4382 }
4383 
4384 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4385                                                abi_ulong target_addr)
4386 {
4387     struct target_shmid_ds *target_sd;
4388 
4389     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4390         return -TARGET_EFAULT;
4391     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4392         return -TARGET_EFAULT;
4393     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4394     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4395     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4396     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4397     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4398     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4399     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4400     unlock_user_struct(target_sd, target_addr, 0);
4401     return 0;
4402 }
4403 
4404 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4405                                                struct shmid_ds *host_sd)
4406 {
4407     struct target_shmid_ds *target_sd;
4408 
4409     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4410         return -TARGET_EFAULT;
4411     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4412         return -TARGET_EFAULT;
4413     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4414     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4415     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4416     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4417     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4418     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4419     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4420     unlock_user_struct(target_sd, target_addr, 1);
4421     return 0;
4422 }
4423 
4424 struct  target_shminfo {
4425     abi_ulong shmmax;
4426     abi_ulong shmmin;
4427     abi_ulong shmmni;
4428     abi_ulong shmseg;
4429     abi_ulong shmall;
4430 };
4431 
4432 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4433                                               struct shminfo *host_shminfo)
4434 {
4435     struct target_shminfo *target_shminfo;
4436     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4437         return -TARGET_EFAULT;
4438     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4439     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4440     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4441     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4442     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4443     unlock_user_struct(target_shminfo, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 struct target_shm_info {
4448     int used_ids;
4449     abi_ulong shm_tot;
4450     abi_ulong shm_rss;
4451     abi_ulong shm_swp;
4452     abi_ulong swap_attempts;
4453     abi_ulong swap_successes;
4454 };
4455 
4456 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4457                                                struct shm_info *host_shm_info)
4458 {
4459     struct target_shm_info *target_shm_info;
4460     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4461         return -TARGET_EFAULT;
4462     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4463     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4464     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4465     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4466     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4467     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4468     unlock_user_struct(target_shm_info, target_addr, 1);
4469     return 0;
4470 }
4471 
4472 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4473 {
4474     struct shmid_ds dsarg;
4475     struct shminfo shminfo;
4476     struct shm_info shm_info;
4477     abi_long ret = -TARGET_EINVAL;
4478 
4479     cmd &= 0xff;
4480 
4481     switch(cmd) {
4482     case IPC_STAT:
4483     case IPC_SET:
4484     case SHM_STAT:
4485         if (target_to_host_shmid_ds(&dsarg, buf))
4486             return -TARGET_EFAULT;
4487         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4488         if (host_to_target_shmid_ds(buf, &dsarg))
4489             return -TARGET_EFAULT;
4490         break;
4491     case IPC_INFO:
4492         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4493         if (host_to_target_shminfo(buf, &shminfo))
4494             return -TARGET_EFAULT;
4495         break;
4496     case SHM_INFO:
4497         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4498         if (host_to_target_shm_info(buf, &shm_info))
4499             return -TARGET_EFAULT;
4500         break;
4501     case IPC_RMID:
4502     case SHM_LOCK:
4503     case SHM_UNLOCK:
4504         ret = get_errno(shmctl(shmid, cmd, NULL));
4505         break;
4506     }
4507 
4508     return ret;
4509 }
4510 
4511 #ifndef TARGET_FORCE_SHMLBA
4512 /* For most architectures, SHMLBA is the same as the page size;
4513  * some architectures have larger values, in which case they should
4514  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4515  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4516  * and defining its own value for SHMLBA.
4517  *
4518  * The kernel also permits SHMLBA to be set by the architecture to a
4519  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4520  * this means that addresses are rounded to the large size if
4521  * SHM_RND is set but addresses not aligned to that size are not rejected
4522  * as long as they are at least page-aligned. Since the only architecture
4523  * which uses this is ia64 this code doesn't provide for that oddity.
4524  */
4525 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4526 {
4527     return TARGET_PAGE_SIZE;
4528 }
4529 #endif
4530 
4531 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4532                                  int shmid, abi_ulong shmaddr, int shmflg)
4533 {
4534     CPUState *cpu = env_cpu(cpu_env);
4535     abi_long raddr;
4536     void *host_raddr;
4537     struct shmid_ds shm_info;
4538     int i,ret;
4539     abi_ulong shmlba;
4540 
4541     /* shmat pointers are always untagged */
4542 
4543     /* find out the length of the shared memory segment */
4544     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4545     if (is_error(ret)) {
4546         /* can't get length, bail out */
4547         return ret;
4548     }
4549 
4550     shmlba = target_shmlba(cpu_env);
4551 
4552     if (shmaddr & (shmlba - 1)) {
4553         if (shmflg & SHM_RND) {
4554             shmaddr &= ~(shmlba - 1);
4555         } else {
4556             return -TARGET_EINVAL;
4557         }
4558     }
4559     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4560         return -TARGET_EINVAL;
4561     }
4562 
4563     mmap_lock();
4564 
4565     /*
4566      * We're mapping shared memory, so ensure we generate code for parallel
4567      * execution and flush old translations.  This will work up to the level
4568      * supported by the host -- anything that requires EXCP_ATOMIC will not
4569      * be atomic with respect to an external process.
4570      */
4571     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4572         cpu->tcg_cflags |= CF_PARALLEL;
4573         tb_flush(cpu);
4574     }
4575 
4576     if (shmaddr)
4577         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4578     else {
4579         abi_ulong mmap_start;
4580 
4581         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4582         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4583 
4584         if (mmap_start == -1) {
4585             errno = ENOMEM;
4586             host_raddr = (void *)-1;
4587         } else
4588             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4589                                shmflg | SHM_REMAP);
4590     }
4591 
4592     if (host_raddr == (void *)-1) {
4593         mmap_unlock();
4594         return get_errno((long)host_raddr);
4595     }
4596     raddr=h2g((unsigned long)host_raddr);
4597 
4598     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4599                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4600                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4601 
4602     for (i = 0; i < N_SHM_REGIONS; i++) {
4603         if (!shm_regions[i].in_use) {
4604             shm_regions[i].in_use = true;
4605             shm_regions[i].start = raddr;
4606             shm_regions[i].size = shm_info.shm_segsz;
4607             break;
4608         }
4609     }
4610 
4611     mmap_unlock();
4612     return raddr;
4613 
4614 }
4615 
4616 static inline abi_long do_shmdt(abi_ulong shmaddr)
4617 {
4618     int i;
4619     abi_long rv;
4620 
4621     /* shmdt pointers are always untagged */
4622 
4623     mmap_lock();
4624 
4625     for (i = 0; i < N_SHM_REGIONS; ++i) {
4626         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4627             shm_regions[i].in_use = false;
4628             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4629             break;
4630         }
4631     }
4632     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4633 
4634     mmap_unlock();
4635 
4636     return rv;
4637 }
4638 
4639 #ifdef TARGET_NR_ipc
4640 /* ??? This only works with linear mappings.  */
4641 /* do_ipc() must return target values and target errnos. */
4642 static abi_long do_ipc(CPUArchState *cpu_env,
4643                        unsigned int call, abi_long first,
4644                        abi_long second, abi_long third,
4645                        abi_long ptr, abi_long fifth)
4646 {
4647     int version;
4648     abi_long ret = 0;
4649 
4650     version = call >> 16;
4651     call &= 0xffff;
4652 
4653     switch (call) {
4654     case IPCOP_semop:
4655         ret = do_semtimedop(first, ptr, second, 0, false);
4656         break;
4657     case IPCOP_semtimedop:
4658     /*
4659      * The s390 sys_ipc variant has only five parameters instead of six
4660      * (as for default variant) and the only difference is the handling of
4661      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4662      * to a struct timespec where the generic variant uses fifth parameter.
4663      */
4664 #if defined(TARGET_S390X)
4665         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4666 #else
4667         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4668 #endif
4669         break;
4670 
4671     case IPCOP_semget:
4672         ret = get_errno(semget(first, second, third));
4673         break;
4674 
4675     case IPCOP_semctl: {
4676         /* The semun argument to semctl is passed by value, so dereference the
4677          * ptr argument. */
4678         abi_ulong atptr;
4679         get_user_ual(atptr, ptr);
4680         ret = do_semctl(first, second, third, atptr);
4681         break;
4682     }
4683 
4684     case IPCOP_msgget:
4685         ret = get_errno(msgget(first, second));
4686         break;
4687 
4688     case IPCOP_msgsnd:
4689         ret = do_msgsnd(first, ptr, second, third);
4690         break;
4691 
4692     case IPCOP_msgctl:
4693         ret = do_msgctl(first, second, ptr);
4694         break;
4695 
4696     case IPCOP_msgrcv:
4697         switch (version) {
4698         case 0:
4699             {
4700                 struct target_ipc_kludge {
4701                     abi_long msgp;
4702                     abi_long msgtyp;
4703                 } *tmp;
4704 
4705                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4706                     ret = -TARGET_EFAULT;
4707                     break;
4708                 }
4709 
4710                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4711 
4712                 unlock_user_struct(tmp, ptr, 0);
4713                 break;
4714             }
4715         default:
4716             ret = do_msgrcv(first, ptr, second, fifth, third);
4717         }
4718         break;
4719 
4720     case IPCOP_shmat:
4721         switch (version) {
4722         default:
4723         {
4724             abi_ulong raddr;
4725             raddr = do_shmat(cpu_env, first, ptr, second);
4726             if (is_error(raddr))
4727                 return get_errno(raddr);
4728             if (put_user_ual(raddr, third))
4729                 return -TARGET_EFAULT;
4730             break;
4731         }
4732         case 1:
4733             ret = -TARGET_EINVAL;
4734             break;
4735         }
4736 	break;
4737     case IPCOP_shmdt:
4738         ret = do_shmdt(ptr);
4739 	break;
4740 
4741     case IPCOP_shmget:
4742 	/* IPC_* flag values are the same on all linux platforms */
4743 	ret = get_errno(shmget(first, second, third));
4744 	break;
4745 
4746 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4747     case IPCOP_shmctl:
4748         ret = do_shmctl(first, second, ptr);
4749         break;
4750     default:
4751         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4752                       call, version);
4753 	ret = -TARGET_ENOSYS;
4754 	break;
4755     }
4756     return ret;
4757 }
4758 #endif
4759 
4760 /* kernel structure types definitions */
4761 
4762 #define STRUCT(name, ...) STRUCT_ ## name,
4763 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4764 enum {
4765 #include "syscall_types.h"
4766 STRUCT_MAX
4767 };
4768 #undef STRUCT
4769 #undef STRUCT_SPECIAL
4770 
4771 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4772 #define STRUCT_SPECIAL(name)
4773 #include "syscall_types.h"
4774 #undef STRUCT
4775 #undef STRUCT_SPECIAL
4776 
4777 #define MAX_STRUCT_SIZE 4096
4778 
4779 #ifdef CONFIG_FIEMAP
4780 /* So fiemap access checks don't overflow on 32 bit systems.
4781  * This is very slightly smaller than the limit imposed by
4782  * the underlying kernel.
4783  */
4784 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4785                             / sizeof(struct fiemap_extent))
4786 
4787 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4788                                        int fd, int cmd, abi_long arg)
4789 {
4790     /* The parameter for this ioctl is a struct fiemap followed
4791      * by an array of struct fiemap_extent whose size is set
4792      * in fiemap->fm_extent_count. The array is filled in by the
4793      * ioctl.
4794      */
4795     int target_size_in, target_size_out;
4796     struct fiemap *fm;
4797     const argtype *arg_type = ie->arg_type;
4798     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4799     void *argptr, *p;
4800     abi_long ret;
4801     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4802     uint32_t outbufsz;
4803     int free_fm = 0;
4804 
4805     assert(arg_type[0] == TYPE_PTR);
4806     assert(ie->access == IOC_RW);
4807     arg_type++;
4808     target_size_in = thunk_type_size(arg_type, 0);
4809     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4810     if (!argptr) {
4811         return -TARGET_EFAULT;
4812     }
4813     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4814     unlock_user(argptr, arg, 0);
4815     fm = (struct fiemap *)buf_temp;
4816     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4817         return -TARGET_EINVAL;
4818     }
4819 
4820     outbufsz = sizeof (*fm) +
4821         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4822 
4823     if (outbufsz > MAX_STRUCT_SIZE) {
4824         /* We can't fit all the extents into the fixed size buffer.
4825          * Allocate one that is large enough and use it instead.
4826          */
4827         fm = g_try_malloc(outbufsz);
4828         if (!fm) {
4829             return -TARGET_ENOMEM;
4830         }
4831         memcpy(fm, buf_temp, sizeof(struct fiemap));
4832         free_fm = 1;
4833     }
4834     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4835     if (!is_error(ret)) {
4836         target_size_out = target_size_in;
4837         /* An extent_count of 0 means we were only counting the extents
4838          * so there are no structs to copy
4839          */
4840         if (fm->fm_extent_count != 0) {
4841             target_size_out += fm->fm_mapped_extents * extent_size;
4842         }
4843         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4844         if (!argptr) {
4845             ret = -TARGET_EFAULT;
4846         } else {
4847             /* Convert the struct fiemap */
4848             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4849             if (fm->fm_extent_count != 0) {
4850                 p = argptr + target_size_in;
4851                 /* ...and then all the struct fiemap_extents */
4852                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4853                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4854                                   THUNK_TARGET);
4855                     p += extent_size;
4856                 }
4857             }
4858             unlock_user(argptr, arg, target_size_out);
4859         }
4860     }
4861     if (free_fm) {
4862         g_free(fm);
4863     }
4864     return ret;
4865 }
4866 #endif
4867 
4868 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4869                                 int fd, int cmd, abi_long arg)
4870 {
4871     const argtype *arg_type = ie->arg_type;
4872     int target_size;
4873     void *argptr;
4874     int ret;
4875     struct ifconf *host_ifconf;
4876     uint32_t outbufsz;
4877     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4878     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4879     int target_ifreq_size;
4880     int nb_ifreq;
4881     int free_buf = 0;
4882     int i;
4883     int target_ifc_len;
4884     abi_long target_ifc_buf;
4885     int host_ifc_len;
4886     char *host_ifc_buf;
4887 
4888     assert(arg_type[0] == TYPE_PTR);
4889     assert(ie->access == IOC_RW);
4890 
4891     arg_type++;
4892     target_size = thunk_type_size(arg_type, 0);
4893 
4894     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4895     if (!argptr)
4896         return -TARGET_EFAULT;
4897     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4898     unlock_user(argptr, arg, 0);
4899 
4900     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4901     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4902     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4903 
4904     if (target_ifc_buf != 0) {
4905         target_ifc_len = host_ifconf->ifc_len;
4906         nb_ifreq = target_ifc_len / target_ifreq_size;
4907         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4908 
4909         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4910         if (outbufsz > MAX_STRUCT_SIZE) {
4911             /*
4912              * We can't fit all the extents into the fixed size buffer.
4913              * Allocate one that is large enough and use it instead.
4914              */
4915             host_ifconf = g_try_malloc(outbufsz);
4916             if (!host_ifconf) {
4917                 return -TARGET_ENOMEM;
4918             }
4919             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4920             free_buf = 1;
4921         }
4922         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4923 
4924         host_ifconf->ifc_len = host_ifc_len;
4925     } else {
4926       host_ifc_buf = NULL;
4927     }
4928     host_ifconf->ifc_buf = host_ifc_buf;
4929 
4930     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4931     if (!is_error(ret)) {
4932 	/* convert host ifc_len to target ifc_len */
4933 
4934         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4935         target_ifc_len = nb_ifreq * target_ifreq_size;
4936         host_ifconf->ifc_len = target_ifc_len;
4937 
4938 	/* restore target ifc_buf */
4939 
4940         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4941 
4942 	/* copy struct ifconf to target user */
4943 
4944         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4945         if (!argptr)
4946             return -TARGET_EFAULT;
4947         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4948         unlock_user(argptr, arg, target_size);
4949 
4950         if (target_ifc_buf != 0) {
4951             /* copy ifreq[] to target user */
4952             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4953             for (i = 0; i < nb_ifreq ; i++) {
4954                 thunk_convert(argptr + i * target_ifreq_size,
4955                               host_ifc_buf + i * sizeof(struct ifreq),
4956                               ifreq_arg_type, THUNK_TARGET);
4957             }
4958             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4959         }
4960     }
4961 
4962     if (free_buf) {
4963         g_free(host_ifconf);
4964     }
4965 
4966     return ret;
4967 }
4968 
4969 #if defined(CONFIG_USBFS)
4970 #if HOST_LONG_BITS > 64
4971 #error USBDEVFS thunks do not support >64 bit hosts yet.
4972 #endif
4973 struct live_urb {
4974     uint64_t target_urb_adr;
4975     uint64_t target_buf_adr;
4976     char *target_buf_ptr;
4977     struct usbdevfs_urb host_urb;
4978 };
4979 
4980 static GHashTable *usbdevfs_urb_hashtable(void)
4981 {
4982     static GHashTable *urb_hashtable;
4983 
4984     if (!urb_hashtable) {
4985         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4986     }
4987     return urb_hashtable;
4988 }
4989 
4990 static void urb_hashtable_insert(struct live_urb *urb)
4991 {
4992     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4993     g_hash_table_insert(urb_hashtable, urb, urb);
4994 }
4995 
4996 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4997 {
4998     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4999     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5000 }
5001 
5002 static void urb_hashtable_remove(struct live_urb *urb)
5003 {
5004     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5005     g_hash_table_remove(urb_hashtable, urb);
5006 }
5007 
5008 static abi_long
5009 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5010                           int fd, int cmd, abi_long arg)
5011 {
5012     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5013     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5014     struct live_urb *lurb;
5015     void *argptr;
5016     uint64_t hurb;
5017     int target_size;
5018     uintptr_t target_urb_adr;
5019     abi_long ret;
5020 
5021     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5022 
5023     memset(buf_temp, 0, sizeof(uint64_t));
5024     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5025     if (is_error(ret)) {
5026         return ret;
5027     }
5028 
5029     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5030     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5031     if (!lurb->target_urb_adr) {
5032         return -TARGET_EFAULT;
5033     }
5034     urb_hashtable_remove(lurb);
5035     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5036         lurb->host_urb.buffer_length);
5037     lurb->target_buf_ptr = NULL;
5038 
5039     /* restore the guest buffer pointer */
5040     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5041 
5042     /* update the guest urb struct */
5043     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5044     if (!argptr) {
5045         g_free(lurb);
5046         return -TARGET_EFAULT;
5047     }
5048     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5049     unlock_user(argptr, lurb->target_urb_adr, target_size);
5050 
5051     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5052     /* write back the urb handle */
5053     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5054     if (!argptr) {
5055         g_free(lurb);
5056         return -TARGET_EFAULT;
5057     }
5058 
5059     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5060     target_urb_adr = lurb->target_urb_adr;
5061     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5062     unlock_user(argptr, arg, target_size);
5063 
5064     g_free(lurb);
5065     return ret;
5066 }
5067 
5068 static abi_long
5069 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5070                              uint8_t *buf_temp __attribute__((unused)),
5071                              int fd, int cmd, abi_long arg)
5072 {
5073     struct live_urb *lurb;
5074 
5075     /* map target address back to host URB with metadata. */
5076     lurb = urb_hashtable_lookup(arg);
5077     if (!lurb) {
5078         return -TARGET_EFAULT;
5079     }
5080     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5081 }
5082 
5083 static abi_long
5084 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5085                             int fd, int cmd, abi_long arg)
5086 {
5087     const argtype *arg_type = ie->arg_type;
5088     int target_size;
5089     abi_long ret;
5090     void *argptr;
5091     int rw_dir;
5092     struct live_urb *lurb;
5093 
5094     /*
5095      * each submitted URB needs to map to a unique ID for the
5096      * kernel, and that unique ID needs to be a pointer to
5097      * host memory.  hence, we need to malloc for each URB.
5098      * isochronous transfers have a variable length struct.
5099      */
5100     arg_type++;
5101     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5102 
5103     /* construct host copy of urb and metadata */
5104     lurb = g_try_new0(struct live_urb, 1);
5105     if (!lurb) {
5106         return -TARGET_ENOMEM;
5107     }
5108 
5109     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110     if (!argptr) {
5111         g_free(lurb);
5112         return -TARGET_EFAULT;
5113     }
5114     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5115     unlock_user(argptr, arg, 0);
5116 
5117     lurb->target_urb_adr = arg;
5118     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5119 
5120     /* buffer space used depends on endpoint type so lock the entire buffer */
5121     /* control type urbs should check the buffer contents for true direction */
5122     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5123     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5124         lurb->host_urb.buffer_length, 1);
5125     if (lurb->target_buf_ptr == NULL) {
5126         g_free(lurb);
5127         return -TARGET_EFAULT;
5128     }
5129 
5130     /* update buffer pointer in host copy */
5131     lurb->host_urb.buffer = lurb->target_buf_ptr;
5132 
5133     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5134     if (is_error(ret)) {
5135         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5136         g_free(lurb);
5137     } else {
5138         urb_hashtable_insert(lurb);
5139     }
5140 
5141     return ret;
5142 }
5143 #endif /* CONFIG_USBFS */
5144 
5145 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5146                             int cmd, abi_long arg)
5147 {
5148     void *argptr;
5149     struct dm_ioctl *host_dm;
5150     abi_long guest_data;
5151     uint32_t guest_data_size;
5152     int target_size;
5153     const argtype *arg_type = ie->arg_type;
5154     abi_long ret;
5155     void *big_buf = NULL;
5156     char *host_data;
5157 
5158     arg_type++;
5159     target_size = thunk_type_size(arg_type, 0);
5160     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5161     if (!argptr) {
5162         ret = -TARGET_EFAULT;
5163         goto out;
5164     }
5165     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5166     unlock_user(argptr, arg, 0);
5167 
5168     /* buf_temp is too small, so fetch things into a bigger buffer */
5169     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5170     memcpy(big_buf, buf_temp, target_size);
5171     buf_temp = big_buf;
5172     host_dm = big_buf;
5173 
5174     guest_data = arg + host_dm->data_start;
5175     if ((guest_data - arg) < 0) {
5176         ret = -TARGET_EINVAL;
5177         goto out;
5178     }
5179     guest_data_size = host_dm->data_size - host_dm->data_start;
5180     host_data = (char*)host_dm + host_dm->data_start;
5181 
5182     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5183     if (!argptr) {
5184         ret = -TARGET_EFAULT;
5185         goto out;
5186     }
5187 
5188     switch (ie->host_cmd) {
5189     case DM_REMOVE_ALL:
5190     case DM_LIST_DEVICES:
5191     case DM_DEV_CREATE:
5192     case DM_DEV_REMOVE:
5193     case DM_DEV_SUSPEND:
5194     case DM_DEV_STATUS:
5195     case DM_DEV_WAIT:
5196     case DM_TABLE_STATUS:
5197     case DM_TABLE_CLEAR:
5198     case DM_TABLE_DEPS:
5199     case DM_LIST_VERSIONS:
5200         /* no input data */
5201         break;
5202     case DM_DEV_RENAME:
5203     case DM_DEV_SET_GEOMETRY:
5204         /* data contains only strings */
5205         memcpy(host_data, argptr, guest_data_size);
5206         break;
5207     case DM_TARGET_MSG:
5208         memcpy(host_data, argptr, guest_data_size);
5209         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5210         break;
5211     case DM_TABLE_LOAD:
5212     {
5213         void *gspec = argptr;
5214         void *cur_data = host_data;
5215         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5216         int spec_size = thunk_type_size(arg_type, 0);
5217         int i;
5218 
5219         for (i = 0; i < host_dm->target_count; i++) {
5220             struct dm_target_spec *spec = cur_data;
5221             uint32_t next;
5222             int slen;
5223 
5224             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5225             slen = strlen((char*)gspec + spec_size) + 1;
5226             next = spec->next;
5227             spec->next = sizeof(*spec) + slen;
5228             strcpy((char*)&spec[1], gspec + spec_size);
5229             gspec += next;
5230             cur_data += spec->next;
5231         }
5232         break;
5233     }
5234     default:
5235         ret = -TARGET_EINVAL;
5236         unlock_user(argptr, guest_data, 0);
5237         goto out;
5238     }
5239     unlock_user(argptr, guest_data, 0);
5240 
5241     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5242     if (!is_error(ret)) {
5243         guest_data = arg + host_dm->data_start;
5244         guest_data_size = host_dm->data_size - host_dm->data_start;
5245         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5246         switch (ie->host_cmd) {
5247         case DM_REMOVE_ALL:
5248         case DM_DEV_CREATE:
5249         case DM_DEV_REMOVE:
5250         case DM_DEV_RENAME:
5251         case DM_DEV_SUSPEND:
5252         case DM_DEV_STATUS:
5253         case DM_TABLE_LOAD:
5254         case DM_TABLE_CLEAR:
5255         case DM_TARGET_MSG:
5256         case DM_DEV_SET_GEOMETRY:
5257             /* no return data */
5258             break;
5259         case DM_LIST_DEVICES:
5260         {
5261             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5262             uint32_t remaining_data = guest_data_size;
5263             void *cur_data = argptr;
5264             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5265             int nl_size = 12; /* can't use thunk_size due to alignment */
5266 
5267             while (1) {
5268                 uint32_t next = nl->next;
5269                 if (next) {
5270                     nl->next = nl_size + (strlen(nl->name) + 1);
5271                 }
5272                 if (remaining_data < nl->next) {
5273                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5274                     break;
5275                 }
5276                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5277                 strcpy(cur_data + nl_size, nl->name);
5278                 cur_data += nl->next;
5279                 remaining_data -= nl->next;
5280                 if (!next) {
5281                     break;
5282                 }
5283                 nl = (void*)nl + next;
5284             }
5285             break;
5286         }
5287         case DM_DEV_WAIT:
5288         case DM_TABLE_STATUS:
5289         {
5290             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5291             void *cur_data = argptr;
5292             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5293             int spec_size = thunk_type_size(arg_type, 0);
5294             int i;
5295 
5296             for (i = 0; i < host_dm->target_count; i++) {
5297                 uint32_t next = spec->next;
5298                 int slen = strlen((char*)&spec[1]) + 1;
5299                 spec->next = (cur_data - argptr) + spec_size + slen;
5300                 if (guest_data_size < spec->next) {
5301                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5302                     break;
5303                 }
5304                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5305                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5306                 cur_data = argptr + spec->next;
5307                 spec = (void*)host_dm + host_dm->data_start + next;
5308             }
5309             break;
5310         }
5311         case DM_TABLE_DEPS:
5312         {
5313             void *hdata = (void*)host_dm + host_dm->data_start;
5314             int count = *(uint32_t*)hdata;
5315             uint64_t *hdev = hdata + 8;
5316             uint64_t *gdev = argptr + 8;
5317             int i;
5318 
5319             *(uint32_t*)argptr = tswap32(count);
5320             for (i = 0; i < count; i++) {
5321                 *gdev = tswap64(*hdev);
5322                 gdev++;
5323                 hdev++;
5324             }
5325             break;
5326         }
5327         case DM_LIST_VERSIONS:
5328         {
5329             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5330             uint32_t remaining_data = guest_data_size;
5331             void *cur_data = argptr;
5332             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5333             int vers_size = thunk_type_size(arg_type, 0);
5334 
5335             while (1) {
5336                 uint32_t next = vers->next;
5337                 if (next) {
5338                     vers->next = vers_size + (strlen(vers->name) + 1);
5339                 }
5340                 if (remaining_data < vers->next) {
5341                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5342                     break;
5343                 }
5344                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5345                 strcpy(cur_data + vers_size, vers->name);
5346                 cur_data += vers->next;
5347                 remaining_data -= vers->next;
5348                 if (!next) {
5349                     break;
5350                 }
5351                 vers = (void*)vers + next;
5352             }
5353             break;
5354         }
5355         default:
5356             unlock_user(argptr, guest_data, 0);
5357             ret = -TARGET_EINVAL;
5358             goto out;
5359         }
5360         unlock_user(argptr, guest_data, guest_data_size);
5361 
5362         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363         if (!argptr) {
5364             ret = -TARGET_EFAULT;
5365             goto out;
5366         }
5367         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5368         unlock_user(argptr, arg, target_size);
5369     }
5370 out:
5371     g_free(big_buf);
5372     return ret;
5373 }
5374 
5375 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5376                                int cmd, abi_long arg)
5377 {
5378     void *argptr;
5379     int target_size;
5380     const argtype *arg_type = ie->arg_type;
5381     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5382     abi_long ret;
5383 
5384     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5385     struct blkpg_partition host_part;
5386 
5387     /* Read and convert blkpg */
5388     arg_type++;
5389     target_size = thunk_type_size(arg_type, 0);
5390     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391     if (!argptr) {
5392         ret = -TARGET_EFAULT;
5393         goto out;
5394     }
5395     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5396     unlock_user(argptr, arg, 0);
5397 
5398     switch (host_blkpg->op) {
5399     case BLKPG_ADD_PARTITION:
5400     case BLKPG_DEL_PARTITION:
5401         /* payload is struct blkpg_partition */
5402         break;
5403     default:
5404         /* Unknown opcode */
5405         ret = -TARGET_EINVAL;
5406         goto out;
5407     }
5408 
5409     /* Read and convert blkpg->data */
5410     arg = (abi_long)(uintptr_t)host_blkpg->data;
5411     target_size = thunk_type_size(part_arg_type, 0);
5412     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5413     if (!argptr) {
5414         ret = -TARGET_EFAULT;
5415         goto out;
5416     }
5417     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5418     unlock_user(argptr, arg, 0);
5419 
5420     /* Swizzle the data pointer to our local copy and call! */
5421     host_blkpg->data = &host_part;
5422     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5423 
5424 out:
5425     return ret;
5426 }
5427 
5428 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5429                                 int fd, int cmd, abi_long arg)
5430 {
5431     const argtype *arg_type = ie->arg_type;
5432     const StructEntry *se;
5433     const argtype *field_types;
5434     const int *dst_offsets, *src_offsets;
5435     int target_size;
5436     void *argptr;
5437     abi_ulong *target_rt_dev_ptr = NULL;
5438     unsigned long *host_rt_dev_ptr = NULL;
5439     abi_long ret;
5440     int i;
5441 
5442     assert(ie->access == IOC_W);
5443     assert(*arg_type == TYPE_PTR);
5444     arg_type++;
5445     assert(*arg_type == TYPE_STRUCT);
5446     target_size = thunk_type_size(arg_type, 0);
5447     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5448     if (!argptr) {
5449         return -TARGET_EFAULT;
5450     }
5451     arg_type++;
5452     assert(*arg_type == (int)STRUCT_rtentry);
5453     se = struct_entries + *arg_type++;
5454     assert(se->convert[0] == NULL);
5455     /* convert struct here to be able to catch rt_dev string */
5456     field_types = se->field_types;
5457     dst_offsets = se->field_offsets[THUNK_HOST];
5458     src_offsets = se->field_offsets[THUNK_TARGET];
5459     for (i = 0; i < se->nb_fields; i++) {
5460         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5461             assert(*field_types == TYPE_PTRVOID);
5462             target_rt_dev_ptr = argptr + src_offsets[i];
5463             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5464             if (*target_rt_dev_ptr != 0) {
5465                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5466                                                   tswapal(*target_rt_dev_ptr));
5467                 if (!*host_rt_dev_ptr) {
5468                     unlock_user(argptr, arg, 0);
5469                     return -TARGET_EFAULT;
5470                 }
5471             } else {
5472                 *host_rt_dev_ptr = 0;
5473             }
5474             field_types++;
5475             continue;
5476         }
5477         field_types = thunk_convert(buf_temp + dst_offsets[i],
5478                                     argptr + src_offsets[i],
5479                                     field_types, THUNK_HOST);
5480     }
5481     unlock_user(argptr, arg, 0);
5482 
5483     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5484 
5485     assert(host_rt_dev_ptr != NULL);
5486     assert(target_rt_dev_ptr != NULL);
5487     if (*host_rt_dev_ptr != 0) {
5488         unlock_user((void *)*host_rt_dev_ptr,
5489                     *target_rt_dev_ptr, 0);
5490     }
5491     return ret;
5492 }
5493 
5494 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5495                                      int fd, int cmd, abi_long arg)
5496 {
5497     int sig = target_to_host_signal(arg);
5498     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5499 }
5500 
5501 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5502                                     int fd, int cmd, abi_long arg)
5503 {
5504     struct timeval tv;
5505     abi_long ret;
5506 
5507     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5508     if (is_error(ret)) {
5509         return ret;
5510     }
5511 
5512     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5513         if (copy_to_user_timeval(arg, &tv)) {
5514             return -TARGET_EFAULT;
5515         }
5516     } else {
5517         if (copy_to_user_timeval64(arg, &tv)) {
5518             return -TARGET_EFAULT;
5519         }
5520     }
5521 
5522     return ret;
5523 }
5524 
5525 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5526                                       int fd, int cmd, abi_long arg)
5527 {
5528     struct timespec ts;
5529     abi_long ret;
5530 
5531     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5532     if (is_error(ret)) {
5533         return ret;
5534     }
5535 
5536     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5537         if (host_to_target_timespec(arg, &ts)) {
5538             return -TARGET_EFAULT;
5539         }
5540     } else{
5541         if (host_to_target_timespec64(arg, &ts)) {
5542             return -TARGET_EFAULT;
5543         }
5544     }
5545 
5546     return ret;
5547 }
5548 
5549 #ifdef TIOCGPTPEER
5550 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5551                                      int fd, int cmd, abi_long arg)
5552 {
5553     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5554     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5555 }
5556 #endif
5557 
5558 #ifdef HAVE_DRM_H
5559 
5560 static void unlock_drm_version(struct drm_version *host_ver,
5561                                struct target_drm_version *target_ver,
5562                                bool copy)
5563 {
5564     unlock_user(host_ver->name, target_ver->name,
5565                                 copy ? host_ver->name_len : 0);
5566     unlock_user(host_ver->date, target_ver->date,
5567                                 copy ? host_ver->date_len : 0);
5568     unlock_user(host_ver->desc, target_ver->desc,
5569                                 copy ? host_ver->desc_len : 0);
5570 }
5571 
5572 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5573                                           struct target_drm_version *target_ver)
5574 {
5575     memset(host_ver, 0, sizeof(*host_ver));
5576 
5577     __get_user(host_ver->name_len, &target_ver->name_len);
5578     if (host_ver->name_len) {
5579         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5580                                    target_ver->name_len, 0);
5581         if (!host_ver->name) {
5582             return -EFAULT;
5583         }
5584     }
5585 
5586     __get_user(host_ver->date_len, &target_ver->date_len);
5587     if (host_ver->date_len) {
5588         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5589                                    target_ver->date_len, 0);
5590         if (!host_ver->date) {
5591             goto err;
5592         }
5593     }
5594 
5595     __get_user(host_ver->desc_len, &target_ver->desc_len);
5596     if (host_ver->desc_len) {
5597         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5598                                    target_ver->desc_len, 0);
5599         if (!host_ver->desc) {
5600             goto err;
5601         }
5602     }
5603 
5604     return 0;
5605 err:
5606     unlock_drm_version(host_ver, target_ver, false);
5607     return -EFAULT;
5608 }
5609 
5610 static inline void host_to_target_drmversion(
5611                                           struct target_drm_version *target_ver,
5612                                           struct drm_version *host_ver)
5613 {
5614     __put_user(host_ver->version_major, &target_ver->version_major);
5615     __put_user(host_ver->version_minor, &target_ver->version_minor);
5616     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5617     __put_user(host_ver->name_len, &target_ver->name_len);
5618     __put_user(host_ver->date_len, &target_ver->date_len);
5619     __put_user(host_ver->desc_len, &target_ver->desc_len);
5620     unlock_drm_version(host_ver, target_ver, true);
5621 }
5622 
5623 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5624                              int fd, int cmd, abi_long arg)
5625 {
5626     struct drm_version *ver;
5627     struct target_drm_version *target_ver;
5628     abi_long ret;
5629 
5630     switch (ie->host_cmd) {
5631     case DRM_IOCTL_VERSION:
5632         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5633             return -TARGET_EFAULT;
5634         }
5635         ver = (struct drm_version *)buf_temp;
5636         ret = target_to_host_drmversion(ver, target_ver);
5637         if (!is_error(ret)) {
5638             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5639             if (is_error(ret)) {
5640                 unlock_drm_version(ver, target_ver, false);
5641             } else {
5642                 host_to_target_drmversion(target_ver, ver);
5643             }
5644         }
5645         unlock_user_struct(target_ver, arg, 0);
5646         return ret;
5647     }
5648     return -TARGET_ENOSYS;
5649 }
5650 
5651 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5652                                            struct drm_i915_getparam *gparam,
5653                                            int fd, abi_long arg)
5654 {
5655     abi_long ret;
5656     int value;
5657     struct target_drm_i915_getparam *target_gparam;
5658 
5659     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5660         return -TARGET_EFAULT;
5661     }
5662 
5663     __get_user(gparam->param, &target_gparam->param);
5664     gparam->value = &value;
5665     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5666     put_user_s32(value, target_gparam->value);
5667 
5668     unlock_user_struct(target_gparam, arg, 0);
5669     return ret;
5670 }
5671 
5672 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5673                                   int fd, int cmd, abi_long arg)
5674 {
5675     switch (ie->host_cmd) {
5676     case DRM_IOCTL_I915_GETPARAM:
5677         return do_ioctl_drm_i915_getparam(ie,
5678                                           (struct drm_i915_getparam *)buf_temp,
5679                                           fd, arg);
5680     default:
5681         return -TARGET_ENOSYS;
5682     }
5683 }
5684 
5685 #endif
5686 
5687 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5688                                         int fd, int cmd, abi_long arg)
5689 {
5690     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5691     struct tun_filter *target_filter;
5692     char *target_addr;
5693 
5694     assert(ie->access == IOC_W);
5695 
5696     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5697     if (!target_filter) {
5698         return -TARGET_EFAULT;
5699     }
5700     filter->flags = tswap16(target_filter->flags);
5701     filter->count = tswap16(target_filter->count);
5702     unlock_user(target_filter, arg, 0);
5703 
5704     if (filter->count) {
5705         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5706             MAX_STRUCT_SIZE) {
5707             return -TARGET_EFAULT;
5708         }
5709 
5710         target_addr = lock_user(VERIFY_READ,
5711                                 arg + offsetof(struct tun_filter, addr),
5712                                 filter->count * ETH_ALEN, 1);
5713         if (!target_addr) {
5714             return -TARGET_EFAULT;
5715         }
5716         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5717         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5718     }
5719 
5720     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5721 }
5722 
5723 IOCTLEntry ioctl_entries[] = {
5724 #define IOCTL(cmd, access, ...) \
5725     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5726 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5727     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5728 #define IOCTL_IGNORE(cmd) \
5729     { TARGET_ ## cmd, 0, #cmd },
5730 #include "ioctls.h"
5731     { 0, 0, },
5732 };
5733 
5734 /* ??? Implement proper locking for ioctls.  */
5735 /* do_ioctl() Must return target values and target errnos. */
5736 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5737 {
5738     const IOCTLEntry *ie;
5739     const argtype *arg_type;
5740     abi_long ret;
5741     uint8_t buf_temp[MAX_STRUCT_SIZE];
5742     int target_size;
5743     void *argptr;
5744 
5745     ie = ioctl_entries;
5746     for(;;) {
5747         if (ie->target_cmd == 0) {
5748             qemu_log_mask(
5749                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5750             return -TARGET_ENOTTY;
5751         }
5752         if (ie->target_cmd == cmd)
5753             break;
5754         ie++;
5755     }
5756     arg_type = ie->arg_type;
5757     if (ie->do_ioctl) {
5758         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5759     } else if (!ie->host_cmd) {
5760         /* Some architectures define BSD ioctls in their headers
5761            that are not implemented in Linux.  */
5762         return -TARGET_ENOTTY;
5763     }
5764 
5765     switch(arg_type[0]) {
5766     case TYPE_NULL:
5767         /* no argument */
5768         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5769         break;
5770     case TYPE_PTRVOID:
5771     case TYPE_INT:
5772     case TYPE_LONG:
5773     case TYPE_ULONG:
5774         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5775         break;
5776     case TYPE_PTR:
5777         arg_type++;
5778         target_size = thunk_type_size(arg_type, 0);
5779         switch(ie->access) {
5780         case IOC_R:
5781             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5782             if (!is_error(ret)) {
5783                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5784                 if (!argptr)
5785                     return -TARGET_EFAULT;
5786                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5787                 unlock_user(argptr, arg, target_size);
5788             }
5789             break;
5790         case IOC_W:
5791             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5792             if (!argptr)
5793                 return -TARGET_EFAULT;
5794             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5795             unlock_user(argptr, arg, 0);
5796             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5797             break;
5798         default:
5799         case IOC_RW:
5800             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5801             if (!argptr)
5802                 return -TARGET_EFAULT;
5803             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5804             unlock_user(argptr, arg, 0);
5805             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5806             if (!is_error(ret)) {
5807                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5808                 if (!argptr)
5809                     return -TARGET_EFAULT;
5810                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5811                 unlock_user(argptr, arg, target_size);
5812             }
5813             break;
5814         }
5815         break;
5816     default:
5817         qemu_log_mask(LOG_UNIMP,
5818                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5819                       (long)cmd, arg_type[0]);
5820         ret = -TARGET_ENOTTY;
5821         break;
5822     }
5823     return ret;
5824 }
5825 
5826 static const bitmask_transtbl iflag_tbl[] = {
5827         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5828         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5829         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5830         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5831         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5832         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5833         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5834         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5835         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5836         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5837         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5838         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5839         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5840         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5841         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5842         { 0, 0, 0, 0 }
5843 };
5844 
5845 static const bitmask_transtbl oflag_tbl[] = {
5846 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5847 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5848 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5849 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5850 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5851 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5852 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5853 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5854 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5855 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5856 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5857 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5858 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5859 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5860 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5861 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5862 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5863 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5864 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5865 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5866 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5867 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5868 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5869 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5870 	{ 0, 0, 0, 0 }
5871 };
5872 
5873 static const bitmask_transtbl cflag_tbl[] = {
5874 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5875 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5876 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5877 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5878 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5879 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5880 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5881 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5882 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5883 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5884 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5885 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5886 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5887 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5888 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5889 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5890 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5891 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5892 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5893 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5894 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5895 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5896 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5897 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5898 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5899 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5900 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5901 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5902 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5903 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5904 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5905 	{ 0, 0, 0, 0 }
5906 };
5907 
5908 static const bitmask_transtbl lflag_tbl[] = {
5909   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5910   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5911   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5912   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5913   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5914   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5915   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5916   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5917   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5918   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5919   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5920   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5921   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5922   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5923   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5924   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5925   { 0, 0, 0, 0 }
5926 };
5927 
5928 static void target_to_host_termios (void *dst, const void *src)
5929 {
5930     struct host_termios *host = dst;
5931     const struct target_termios *target = src;
5932 
5933     host->c_iflag =
5934         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5935     host->c_oflag =
5936         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5937     host->c_cflag =
5938         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5939     host->c_lflag =
5940         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5941     host->c_line = target->c_line;
5942 
5943     memset(host->c_cc, 0, sizeof(host->c_cc));
5944     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5945     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5946     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5947     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5948     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5949     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5950     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5951     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5952     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5953     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5954     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5955     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5956     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5957     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5958     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5959     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5960     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5961 }
5962 
5963 static void host_to_target_termios (void *dst, const void *src)
5964 {
5965     struct target_termios *target = dst;
5966     const struct host_termios *host = src;
5967 
5968     target->c_iflag =
5969         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5970     target->c_oflag =
5971         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5972     target->c_cflag =
5973         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5974     target->c_lflag =
5975         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5976     target->c_line = host->c_line;
5977 
5978     memset(target->c_cc, 0, sizeof(target->c_cc));
5979     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5980     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5981     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5982     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5983     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5984     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5985     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5986     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5987     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5988     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5989     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5990     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5991     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5992     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5993     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5994     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5995     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5996 }
5997 
5998 static const StructEntry struct_termios_def = {
5999     .convert = { host_to_target_termios, target_to_host_termios },
6000     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6001     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6002     .print = print_termios,
6003 };
6004 
6005 static const bitmask_transtbl mmap_flags_tbl[] = {
6006     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6007     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6008     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6009     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6010       MAP_ANONYMOUS, MAP_ANONYMOUS },
6011     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6012       MAP_GROWSDOWN, MAP_GROWSDOWN },
6013     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6014       MAP_DENYWRITE, MAP_DENYWRITE },
6015     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6016       MAP_EXECUTABLE, MAP_EXECUTABLE },
6017     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6018     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6019       MAP_NORESERVE, MAP_NORESERVE },
6020     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6021     /* MAP_STACK had been ignored by the kernel for quite some time.
6022        Recognize it for the target insofar as we do not want to pass
6023        it through to the host.  */
6024     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6025     { 0, 0, 0, 0 }
6026 };
6027 
6028 /*
6029  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6030  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6031  */
6032 #if defined(TARGET_I386)
6033 
6034 /* NOTE: there is really one LDT for all the threads */
6035 static uint8_t *ldt_table;
6036 
6037 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6038 {
6039     int size;
6040     void *p;
6041 
6042     if (!ldt_table)
6043         return 0;
6044     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6045     if (size > bytecount)
6046         size = bytecount;
6047     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6048     if (!p)
6049         return -TARGET_EFAULT;
6050     /* ??? Should this by byteswapped?  */
6051     memcpy(p, ldt_table, size);
6052     unlock_user(p, ptr, size);
6053     return size;
6054 }
6055 
6056 /* XXX: add locking support */
6057 static abi_long write_ldt(CPUX86State *env,
6058                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6059 {
6060     struct target_modify_ldt_ldt_s ldt_info;
6061     struct target_modify_ldt_ldt_s *target_ldt_info;
6062     int seg_32bit, contents, read_exec_only, limit_in_pages;
6063     int seg_not_present, useable, lm;
6064     uint32_t *lp, entry_1, entry_2;
6065 
6066     if (bytecount != sizeof(ldt_info))
6067         return -TARGET_EINVAL;
6068     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6069         return -TARGET_EFAULT;
6070     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6071     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6072     ldt_info.limit = tswap32(target_ldt_info->limit);
6073     ldt_info.flags = tswap32(target_ldt_info->flags);
6074     unlock_user_struct(target_ldt_info, ptr, 0);
6075 
6076     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6077         return -TARGET_EINVAL;
6078     seg_32bit = ldt_info.flags & 1;
6079     contents = (ldt_info.flags >> 1) & 3;
6080     read_exec_only = (ldt_info.flags >> 3) & 1;
6081     limit_in_pages = (ldt_info.flags >> 4) & 1;
6082     seg_not_present = (ldt_info.flags >> 5) & 1;
6083     useable = (ldt_info.flags >> 6) & 1;
6084 #ifdef TARGET_ABI32
6085     lm = 0;
6086 #else
6087     lm = (ldt_info.flags >> 7) & 1;
6088 #endif
6089     if (contents == 3) {
6090         if (oldmode)
6091             return -TARGET_EINVAL;
6092         if (seg_not_present == 0)
6093             return -TARGET_EINVAL;
6094     }
6095     /* allocate the LDT */
6096     if (!ldt_table) {
6097         env->ldt.base = target_mmap(0,
6098                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6099                                     PROT_READ|PROT_WRITE,
6100                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6101         if (env->ldt.base == -1)
6102             return -TARGET_ENOMEM;
6103         memset(g2h_untagged(env->ldt.base), 0,
6104                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6105         env->ldt.limit = 0xffff;
6106         ldt_table = g2h_untagged(env->ldt.base);
6107     }
6108 
6109     /* NOTE: same code as Linux kernel */
6110     /* Allow LDTs to be cleared by the user. */
6111     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6112         if (oldmode ||
6113             (contents == 0		&&
6114              read_exec_only == 1	&&
6115              seg_32bit == 0		&&
6116              limit_in_pages == 0	&&
6117              seg_not_present == 1	&&
6118              useable == 0 )) {
6119             entry_1 = 0;
6120             entry_2 = 0;
6121             goto install;
6122         }
6123     }
6124 
6125     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6126         (ldt_info.limit & 0x0ffff);
6127     entry_2 = (ldt_info.base_addr & 0xff000000) |
6128         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6129         (ldt_info.limit & 0xf0000) |
6130         ((read_exec_only ^ 1) << 9) |
6131         (contents << 10) |
6132         ((seg_not_present ^ 1) << 15) |
6133         (seg_32bit << 22) |
6134         (limit_in_pages << 23) |
6135         (lm << 21) |
6136         0x7000;
6137     if (!oldmode)
6138         entry_2 |= (useable << 20);
6139 
6140     /* Install the new entry ...  */
6141 install:
6142     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6143     lp[0] = tswap32(entry_1);
6144     lp[1] = tswap32(entry_2);
6145     return 0;
6146 }
6147 
6148 /* specific and weird i386 syscalls */
6149 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6150                               unsigned long bytecount)
6151 {
6152     abi_long ret;
6153 
6154     switch (func) {
6155     case 0:
6156         ret = read_ldt(ptr, bytecount);
6157         break;
6158     case 1:
6159         ret = write_ldt(env, ptr, bytecount, 1);
6160         break;
6161     case 0x11:
6162         ret = write_ldt(env, ptr, bytecount, 0);
6163         break;
6164     default:
6165         ret = -TARGET_ENOSYS;
6166         break;
6167     }
6168     return ret;
6169 }
6170 
6171 #if defined(TARGET_ABI32)
6172 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6173 {
6174     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6175     struct target_modify_ldt_ldt_s ldt_info;
6176     struct target_modify_ldt_ldt_s *target_ldt_info;
6177     int seg_32bit, contents, read_exec_only, limit_in_pages;
6178     int seg_not_present, useable, lm;
6179     uint32_t *lp, entry_1, entry_2;
6180     int i;
6181 
6182     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6183     if (!target_ldt_info)
6184         return -TARGET_EFAULT;
6185     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6186     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6187     ldt_info.limit = tswap32(target_ldt_info->limit);
6188     ldt_info.flags = tswap32(target_ldt_info->flags);
6189     if (ldt_info.entry_number == -1) {
6190         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6191             if (gdt_table[i] == 0) {
6192                 ldt_info.entry_number = i;
6193                 target_ldt_info->entry_number = tswap32(i);
6194                 break;
6195             }
6196         }
6197     }
6198     unlock_user_struct(target_ldt_info, ptr, 1);
6199 
6200     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6201         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6202            return -TARGET_EINVAL;
6203     seg_32bit = ldt_info.flags & 1;
6204     contents = (ldt_info.flags >> 1) & 3;
6205     read_exec_only = (ldt_info.flags >> 3) & 1;
6206     limit_in_pages = (ldt_info.flags >> 4) & 1;
6207     seg_not_present = (ldt_info.flags >> 5) & 1;
6208     useable = (ldt_info.flags >> 6) & 1;
6209 #ifdef TARGET_ABI32
6210     lm = 0;
6211 #else
6212     lm = (ldt_info.flags >> 7) & 1;
6213 #endif
6214 
6215     if (contents == 3) {
6216         if (seg_not_present == 0)
6217             return -TARGET_EINVAL;
6218     }
6219 
6220     /* NOTE: same code as Linux kernel */
6221     /* Allow LDTs to be cleared by the user. */
6222     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6223         if ((contents == 0             &&
6224              read_exec_only == 1       &&
6225              seg_32bit == 0            &&
6226              limit_in_pages == 0       &&
6227              seg_not_present == 1      &&
6228              useable == 0 )) {
6229             entry_1 = 0;
6230             entry_2 = 0;
6231             goto install;
6232         }
6233     }
6234 
6235     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6236         (ldt_info.limit & 0x0ffff);
6237     entry_2 = (ldt_info.base_addr & 0xff000000) |
6238         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6239         (ldt_info.limit & 0xf0000) |
6240         ((read_exec_only ^ 1) << 9) |
6241         (contents << 10) |
6242         ((seg_not_present ^ 1) << 15) |
6243         (seg_32bit << 22) |
6244         (limit_in_pages << 23) |
6245         (useable << 20) |
6246         (lm << 21) |
6247         0x7000;
6248 
6249     /* Install the new entry ...  */
6250 install:
6251     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6252     lp[0] = tswap32(entry_1);
6253     lp[1] = tswap32(entry_2);
6254     return 0;
6255 }
6256 
6257 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6258 {
6259     struct target_modify_ldt_ldt_s *target_ldt_info;
6260     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6261     uint32_t base_addr, limit, flags;
6262     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6263     int seg_not_present, useable, lm;
6264     uint32_t *lp, entry_1, entry_2;
6265 
6266     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6267     if (!target_ldt_info)
6268         return -TARGET_EFAULT;
6269     idx = tswap32(target_ldt_info->entry_number);
6270     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6271         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6272         unlock_user_struct(target_ldt_info, ptr, 1);
6273         return -TARGET_EINVAL;
6274     }
6275     lp = (uint32_t *)(gdt_table + idx);
6276     entry_1 = tswap32(lp[0]);
6277     entry_2 = tswap32(lp[1]);
6278 
6279     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6280     contents = (entry_2 >> 10) & 3;
6281     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6282     seg_32bit = (entry_2 >> 22) & 1;
6283     limit_in_pages = (entry_2 >> 23) & 1;
6284     useable = (entry_2 >> 20) & 1;
6285 #ifdef TARGET_ABI32
6286     lm = 0;
6287 #else
6288     lm = (entry_2 >> 21) & 1;
6289 #endif
6290     flags = (seg_32bit << 0) | (contents << 1) |
6291         (read_exec_only << 3) | (limit_in_pages << 4) |
6292         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6293     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6294     base_addr = (entry_1 >> 16) |
6295         (entry_2 & 0xff000000) |
6296         ((entry_2 & 0xff) << 16);
6297     target_ldt_info->base_addr = tswapal(base_addr);
6298     target_ldt_info->limit = tswap32(limit);
6299     target_ldt_info->flags = tswap32(flags);
6300     unlock_user_struct(target_ldt_info, ptr, 1);
6301     return 0;
6302 }
6303 
6304 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6305 {
6306     return -TARGET_ENOSYS;
6307 }
6308 #else
6309 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6310 {
6311     abi_long ret = 0;
6312     abi_ulong val;
6313     int idx;
6314 
6315     switch(code) {
6316     case TARGET_ARCH_SET_GS:
6317     case TARGET_ARCH_SET_FS:
6318         if (code == TARGET_ARCH_SET_GS)
6319             idx = R_GS;
6320         else
6321             idx = R_FS;
6322         cpu_x86_load_seg(env, idx, 0);
6323         env->segs[idx].base = addr;
6324         break;
6325     case TARGET_ARCH_GET_GS:
6326     case TARGET_ARCH_GET_FS:
6327         if (code == TARGET_ARCH_GET_GS)
6328             idx = R_GS;
6329         else
6330             idx = R_FS;
6331         val = env->segs[idx].base;
6332         if (put_user(val, addr, abi_ulong))
6333             ret = -TARGET_EFAULT;
6334         break;
6335     default:
6336         ret = -TARGET_EINVAL;
6337         break;
6338     }
6339     return ret;
6340 }
6341 #endif /* defined(TARGET_ABI32 */
6342 #endif /* defined(TARGET_I386) */
6343 
6344 /*
6345  * These constants are generic.  Supply any that are missing from the host.
6346  */
6347 #ifndef PR_SET_NAME
6348 # define PR_SET_NAME    15
6349 # define PR_GET_NAME    16
6350 #endif
6351 #ifndef PR_SET_FP_MODE
6352 # define PR_SET_FP_MODE 45
6353 # define PR_GET_FP_MODE 46
6354 # define PR_FP_MODE_FR   (1 << 0)
6355 # define PR_FP_MODE_FRE  (1 << 1)
6356 #endif
6357 #ifndef PR_SVE_SET_VL
6358 # define PR_SVE_SET_VL  50
6359 # define PR_SVE_GET_VL  51
6360 # define PR_SVE_VL_LEN_MASK  0xffff
6361 # define PR_SVE_VL_INHERIT   (1 << 17)
6362 #endif
6363 #ifndef PR_PAC_RESET_KEYS
6364 # define PR_PAC_RESET_KEYS  54
6365 # define PR_PAC_APIAKEY   (1 << 0)
6366 # define PR_PAC_APIBKEY   (1 << 1)
6367 # define PR_PAC_APDAKEY   (1 << 2)
6368 # define PR_PAC_APDBKEY   (1 << 3)
6369 # define PR_PAC_APGAKEY   (1 << 4)
6370 #endif
6371 #ifndef PR_SET_TAGGED_ADDR_CTRL
6372 # define PR_SET_TAGGED_ADDR_CTRL 55
6373 # define PR_GET_TAGGED_ADDR_CTRL 56
6374 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6375 #endif
6376 #ifndef PR_MTE_TCF_SHIFT
6377 # define PR_MTE_TCF_SHIFT       1
6378 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6379 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6380 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6381 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6382 # define PR_MTE_TAG_SHIFT       3
6383 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6384 #endif
6385 #ifndef PR_SET_IO_FLUSHER
6386 # define PR_SET_IO_FLUSHER 57
6387 # define PR_GET_IO_FLUSHER 58
6388 #endif
6389 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6390 # define PR_SET_SYSCALL_USER_DISPATCH 59
6391 #endif
6392 #ifndef PR_SME_SET_VL
6393 # define PR_SME_SET_VL  63
6394 # define PR_SME_GET_VL  64
6395 # define PR_SME_VL_LEN_MASK  0xffff
6396 # define PR_SME_VL_INHERIT   (1 << 17)
6397 #endif
6398 
6399 #include "target_prctl.h"
6400 
6401 static abi_long do_prctl_inval0(CPUArchState *env)
6402 {
6403     return -TARGET_EINVAL;
6404 }
6405 
6406 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6407 {
6408     return -TARGET_EINVAL;
6409 }
6410 
6411 #ifndef do_prctl_get_fp_mode
6412 #define do_prctl_get_fp_mode do_prctl_inval0
6413 #endif
6414 #ifndef do_prctl_set_fp_mode
6415 #define do_prctl_set_fp_mode do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_sve_get_vl
6418 #define do_prctl_sve_get_vl do_prctl_inval0
6419 #endif
6420 #ifndef do_prctl_sve_set_vl
6421 #define do_prctl_sve_set_vl do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_reset_keys
6424 #define do_prctl_reset_keys do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_set_tagged_addr_ctrl
6427 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_get_tagged_addr_ctrl
6430 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6431 #endif
6432 #ifndef do_prctl_get_unalign
6433 #define do_prctl_get_unalign do_prctl_inval1
6434 #endif
6435 #ifndef do_prctl_set_unalign
6436 #define do_prctl_set_unalign do_prctl_inval1
6437 #endif
6438 #ifndef do_prctl_sme_get_vl
6439 #define do_prctl_sme_get_vl do_prctl_inval0
6440 #endif
6441 #ifndef do_prctl_sme_set_vl
6442 #define do_prctl_sme_set_vl do_prctl_inval1
6443 #endif
6444 
6445 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6446                          abi_long arg3, abi_long arg4, abi_long arg5)
6447 {
6448     abi_long ret;
6449 
6450     switch (option) {
6451     case PR_GET_PDEATHSIG:
6452         {
6453             int deathsig;
6454             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6455                                   arg3, arg4, arg5));
6456             if (!is_error(ret) &&
6457                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6458                 return -TARGET_EFAULT;
6459             }
6460             return ret;
6461         }
6462     case PR_SET_PDEATHSIG:
6463         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6464                                arg3, arg4, arg5));
6465     case PR_GET_NAME:
6466         {
6467             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6468             if (!name) {
6469                 return -TARGET_EFAULT;
6470             }
6471             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6472                                   arg3, arg4, arg5));
6473             unlock_user(name, arg2, 16);
6474             return ret;
6475         }
6476     case PR_SET_NAME:
6477         {
6478             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6479             if (!name) {
6480                 return -TARGET_EFAULT;
6481             }
6482             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6483                                   arg3, arg4, arg5));
6484             unlock_user(name, arg2, 0);
6485             return ret;
6486         }
6487     case PR_GET_FP_MODE:
6488         return do_prctl_get_fp_mode(env);
6489     case PR_SET_FP_MODE:
6490         return do_prctl_set_fp_mode(env, arg2);
6491     case PR_SVE_GET_VL:
6492         return do_prctl_sve_get_vl(env);
6493     case PR_SVE_SET_VL:
6494         return do_prctl_sve_set_vl(env, arg2);
6495     case PR_SME_GET_VL:
6496         return do_prctl_sme_get_vl(env);
6497     case PR_SME_SET_VL:
6498         return do_prctl_sme_set_vl(env, arg2);
6499     case PR_PAC_RESET_KEYS:
6500         if (arg3 || arg4 || arg5) {
6501             return -TARGET_EINVAL;
6502         }
6503         return do_prctl_reset_keys(env, arg2);
6504     case PR_SET_TAGGED_ADDR_CTRL:
6505         if (arg3 || arg4 || arg5) {
6506             return -TARGET_EINVAL;
6507         }
6508         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6509     case PR_GET_TAGGED_ADDR_CTRL:
6510         if (arg2 || arg3 || arg4 || arg5) {
6511             return -TARGET_EINVAL;
6512         }
6513         return do_prctl_get_tagged_addr_ctrl(env);
6514 
6515     case PR_GET_UNALIGN:
6516         return do_prctl_get_unalign(env, arg2);
6517     case PR_SET_UNALIGN:
6518         return do_prctl_set_unalign(env, arg2);
6519 
6520     case PR_CAP_AMBIENT:
6521     case PR_CAPBSET_READ:
6522     case PR_CAPBSET_DROP:
6523     case PR_GET_DUMPABLE:
6524     case PR_SET_DUMPABLE:
6525     case PR_GET_KEEPCAPS:
6526     case PR_SET_KEEPCAPS:
6527     case PR_GET_SECUREBITS:
6528     case PR_SET_SECUREBITS:
6529     case PR_GET_TIMING:
6530     case PR_SET_TIMING:
6531     case PR_GET_TIMERSLACK:
6532     case PR_SET_TIMERSLACK:
6533     case PR_MCE_KILL:
6534     case PR_MCE_KILL_GET:
6535     case PR_GET_NO_NEW_PRIVS:
6536     case PR_SET_NO_NEW_PRIVS:
6537     case PR_GET_IO_FLUSHER:
6538     case PR_SET_IO_FLUSHER:
6539         /* Some prctl options have no pointer arguments and we can pass on. */
6540         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6541 
6542     case PR_GET_CHILD_SUBREAPER:
6543     case PR_SET_CHILD_SUBREAPER:
6544     case PR_GET_SPECULATION_CTRL:
6545     case PR_SET_SPECULATION_CTRL:
6546     case PR_GET_TID_ADDRESS:
6547         /* TODO */
6548         return -TARGET_EINVAL;
6549 
6550     case PR_GET_FPEXC:
6551     case PR_SET_FPEXC:
6552         /* Was used for SPE on PowerPC. */
6553         return -TARGET_EINVAL;
6554 
6555     case PR_GET_ENDIAN:
6556     case PR_SET_ENDIAN:
6557     case PR_GET_FPEMU:
6558     case PR_SET_FPEMU:
6559     case PR_SET_MM:
6560     case PR_GET_SECCOMP:
6561     case PR_SET_SECCOMP:
6562     case PR_SET_SYSCALL_USER_DISPATCH:
6563     case PR_GET_THP_DISABLE:
6564     case PR_SET_THP_DISABLE:
6565     case PR_GET_TSC:
6566     case PR_SET_TSC:
6567         /* Disable to prevent the target disabling stuff we need. */
6568         return -TARGET_EINVAL;
6569 
6570     default:
6571         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6572                       option);
6573         return -TARGET_EINVAL;
6574     }
6575 }
6576 
6577 #define NEW_STACK_SIZE 0x40000
6578 
6579 
6580 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6581 typedef struct {
6582     CPUArchState *env;
6583     pthread_mutex_t mutex;
6584     pthread_cond_t cond;
6585     pthread_t thread;
6586     uint32_t tid;
6587     abi_ulong child_tidptr;
6588     abi_ulong parent_tidptr;
6589     sigset_t sigmask;
6590 } new_thread_info;
6591 
6592 static void *clone_func(void *arg)
6593 {
6594     new_thread_info *info = arg;
6595     CPUArchState *env;
6596     CPUState *cpu;
6597     TaskState *ts;
6598 
6599     rcu_register_thread();
6600     tcg_register_thread();
6601     env = info->env;
6602     cpu = env_cpu(env);
6603     thread_cpu = cpu;
6604     ts = (TaskState *)cpu->opaque;
6605     info->tid = sys_gettid();
6606     task_settid(ts);
6607     if (info->child_tidptr)
6608         put_user_u32(info->tid, info->child_tidptr);
6609     if (info->parent_tidptr)
6610         put_user_u32(info->tid, info->parent_tidptr);
6611     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6612     /* Enable signals.  */
6613     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6614     /* Signal to the parent that we're ready.  */
6615     pthread_mutex_lock(&info->mutex);
6616     pthread_cond_broadcast(&info->cond);
6617     pthread_mutex_unlock(&info->mutex);
6618     /* Wait until the parent has finished initializing the tls state.  */
6619     pthread_mutex_lock(&clone_lock);
6620     pthread_mutex_unlock(&clone_lock);
6621     cpu_loop(env);
6622     /* never exits */
6623     return NULL;
6624 }
6625 
6626 /* do_fork() Must return host values and target errnos (unlike most
6627    do_*() functions). */
6628 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6629                    abi_ulong parent_tidptr, target_ulong newtls,
6630                    abi_ulong child_tidptr)
6631 {
6632     CPUState *cpu = env_cpu(env);
6633     int ret;
6634     TaskState *ts;
6635     CPUState *new_cpu;
6636     CPUArchState *new_env;
6637     sigset_t sigmask;
6638 
6639     flags &= ~CLONE_IGNORED_FLAGS;
6640 
6641     /* Emulate vfork() with fork() */
6642     if (flags & CLONE_VFORK)
6643         flags &= ~(CLONE_VFORK | CLONE_VM);
6644 
6645     if (flags & CLONE_VM) {
6646         TaskState *parent_ts = (TaskState *)cpu->opaque;
6647         new_thread_info info;
6648         pthread_attr_t attr;
6649 
6650         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6651             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6652             return -TARGET_EINVAL;
6653         }
6654 
6655         ts = g_new0(TaskState, 1);
6656         init_task_state(ts);
6657 
6658         /* Grab a mutex so that thread setup appears atomic.  */
6659         pthread_mutex_lock(&clone_lock);
6660 
6661         /*
6662          * If this is our first additional thread, we need to ensure we
6663          * generate code for parallel execution and flush old translations.
6664          * Do this now so that the copy gets CF_PARALLEL too.
6665          */
6666         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6667             cpu->tcg_cflags |= CF_PARALLEL;
6668             tb_flush(cpu);
6669         }
6670 
6671         /* we create a new CPU instance. */
6672         new_env = cpu_copy(env);
6673         /* Init regs that differ from the parent.  */
6674         cpu_clone_regs_child(new_env, newsp, flags);
6675         cpu_clone_regs_parent(env, flags);
6676         new_cpu = env_cpu(new_env);
6677         new_cpu->opaque = ts;
6678         ts->bprm = parent_ts->bprm;
6679         ts->info = parent_ts->info;
6680         ts->signal_mask = parent_ts->signal_mask;
6681 
6682         if (flags & CLONE_CHILD_CLEARTID) {
6683             ts->child_tidptr = child_tidptr;
6684         }
6685 
6686         if (flags & CLONE_SETTLS) {
6687             cpu_set_tls (new_env, newtls);
6688         }
6689 
6690         memset(&info, 0, sizeof(info));
6691         pthread_mutex_init(&info.mutex, NULL);
6692         pthread_mutex_lock(&info.mutex);
6693         pthread_cond_init(&info.cond, NULL);
6694         info.env = new_env;
6695         if (flags & CLONE_CHILD_SETTID) {
6696             info.child_tidptr = child_tidptr;
6697         }
6698         if (flags & CLONE_PARENT_SETTID) {
6699             info.parent_tidptr = parent_tidptr;
6700         }
6701 
6702         ret = pthread_attr_init(&attr);
6703         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6704         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6705         /* It is not safe to deliver signals until the child has finished
6706            initializing, so temporarily block all signals.  */
6707         sigfillset(&sigmask);
6708         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6709         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6710 
6711         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6712         /* TODO: Free new CPU state if thread creation failed.  */
6713 
6714         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6715         pthread_attr_destroy(&attr);
6716         if (ret == 0) {
6717             /* Wait for the child to initialize.  */
6718             pthread_cond_wait(&info.cond, &info.mutex);
6719             ret = info.tid;
6720         } else {
6721             ret = -1;
6722         }
6723         pthread_mutex_unlock(&info.mutex);
6724         pthread_cond_destroy(&info.cond);
6725         pthread_mutex_destroy(&info.mutex);
6726         pthread_mutex_unlock(&clone_lock);
6727     } else {
6728         /* if no CLONE_VM, we consider it is a fork */
6729         if (flags & CLONE_INVALID_FORK_FLAGS) {
6730             return -TARGET_EINVAL;
6731         }
6732 
6733         /* We can't support custom termination signals */
6734         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6735             return -TARGET_EINVAL;
6736         }
6737 
6738 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6739         if (flags & CLONE_PIDFD) {
6740             return -TARGET_EINVAL;
6741         }
6742 #endif
6743 
6744         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6745         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6746             return -TARGET_EINVAL;
6747         }
6748 
6749         if (block_signals()) {
6750             return -QEMU_ERESTARTSYS;
6751         }
6752 
6753         fork_start();
6754         ret = fork();
6755         if (ret == 0) {
6756             /* Child Process.  */
6757             cpu_clone_regs_child(env, newsp, flags);
6758             fork_end(1);
6759             /* There is a race condition here.  The parent process could
6760                theoretically read the TID in the child process before the child
6761                tid is set.  This would require using either ptrace
6762                (not implemented) or having *_tidptr to point at a shared memory
6763                mapping.  We can't repeat the spinlock hack used above because
6764                the child process gets its own copy of the lock.  */
6765             if (flags & CLONE_CHILD_SETTID)
6766                 put_user_u32(sys_gettid(), child_tidptr);
6767             if (flags & CLONE_PARENT_SETTID)
6768                 put_user_u32(sys_gettid(), parent_tidptr);
6769             ts = (TaskState *)cpu->opaque;
6770             if (flags & CLONE_SETTLS)
6771                 cpu_set_tls (env, newtls);
6772             if (flags & CLONE_CHILD_CLEARTID)
6773                 ts->child_tidptr = child_tidptr;
6774         } else {
6775             cpu_clone_regs_parent(env, flags);
6776             if (flags & CLONE_PIDFD) {
6777                 int pid_fd = 0;
6778 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6779                 int pid_child = ret;
6780                 pid_fd = pidfd_open(pid_child, 0);
6781                 if (pid_fd >= 0) {
6782                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6783                                                | FD_CLOEXEC);
6784                 } else {
6785                         pid_fd = 0;
6786                 }
6787 #endif
6788                 put_user_u32(pid_fd, parent_tidptr);
6789                 }
6790             fork_end(0);
6791         }
6792         g_assert(!cpu_in_exclusive_context(cpu));
6793     }
6794     return ret;
6795 }
6796 
6797 /* warning : doesn't handle linux specific flags... */
6798 static int target_to_host_fcntl_cmd(int cmd)
6799 {
6800     int ret;
6801 
6802     switch(cmd) {
6803     case TARGET_F_DUPFD:
6804     case TARGET_F_GETFD:
6805     case TARGET_F_SETFD:
6806     case TARGET_F_GETFL:
6807     case TARGET_F_SETFL:
6808     case TARGET_F_OFD_GETLK:
6809     case TARGET_F_OFD_SETLK:
6810     case TARGET_F_OFD_SETLKW:
6811         ret = cmd;
6812         break;
6813     case TARGET_F_GETLK:
6814         ret = F_GETLK64;
6815         break;
6816     case TARGET_F_SETLK:
6817         ret = F_SETLK64;
6818         break;
6819     case TARGET_F_SETLKW:
6820         ret = F_SETLKW64;
6821         break;
6822     case TARGET_F_GETOWN:
6823         ret = F_GETOWN;
6824         break;
6825     case TARGET_F_SETOWN:
6826         ret = F_SETOWN;
6827         break;
6828     case TARGET_F_GETSIG:
6829         ret = F_GETSIG;
6830         break;
6831     case TARGET_F_SETSIG:
6832         ret = F_SETSIG;
6833         break;
6834 #if TARGET_ABI_BITS == 32
6835     case TARGET_F_GETLK64:
6836         ret = F_GETLK64;
6837         break;
6838     case TARGET_F_SETLK64:
6839         ret = F_SETLK64;
6840         break;
6841     case TARGET_F_SETLKW64:
6842         ret = F_SETLKW64;
6843         break;
6844 #endif
6845     case TARGET_F_SETLEASE:
6846         ret = F_SETLEASE;
6847         break;
6848     case TARGET_F_GETLEASE:
6849         ret = F_GETLEASE;
6850         break;
6851 #ifdef F_DUPFD_CLOEXEC
6852     case TARGET_F_DUPFD_CLOEXEC:
6853         ret = F_DUPFD_CLOEXEC;
6854         break;
6855 #endif
6856     case TARGET_F_NOTIFY:
6857         ret = F_NOTIFY;
6858         break;
6859 #ifdef F_GETOWN_EX
6860     case TARGET_F_GETOWN_EX:
6861         ret = F_GETOWN_EX;
6862         break;
6863 #endif
6864 #ifdef F_SETOWN_EX
6865     case TARGET_F_SETOWN_EX:
6866         ret = F_SETOWN_EX;
6867         break;
6868 #endif
6869 #ifdef F_SETPIPE_SZ
6870     case TARGET_F_SETPIPE_SZ:
6871         ret = F_SETPIPE_SZ;
6872         break;
6873     case TARGET_F_GETPIPE_SZ:
6874         ret = F_GETPIPE_SZ;
6875         break;
6876 #endif
6877 #ifdef F_ADD_SEALS
6878     case TARGET_F_ADD_SEALS:
6879         ret = F_ADD_SEALS;
6880         break;
6881     case TARGET_F_GET_SEALS:
6882         ret = F_GET_SEALS;
6883         break;
6884 #endif
6885     default:
6886         ret = -TARGET_EINVAL;
6887         break;
6888     }
6889 
6890 #if defined(__powerpc64__)
6891     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6892      * is not supported by kernel. The glibc fcntl call actually adjusts
6893      * them to 5, 6 and 7 before making the syscall(). Since we make the
6894      * syscall directly, adjust to what is supported by the kernel.
6895      */
6896     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6897         ret -= F_GETLK64 - 5;
6898     }
6899 #endif
6900 
6901     return ret;
6902 }
6903 
6904 #define FLOCK_TRANSTBL \
6905     switch (type) { \
6906     TRANSTBL_CONVERT(F_RDLCK); \
6907     TRANSTBL_CONVERT(F_WRLCK); \
6908     TRANSTBL_CONVERT(F_UNLCK); \
6909     }
6910 
6911 static int target_to_host_flock(int type)
6912 {
6913 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6914     FLOCK_TRANSTBL
6915 #undef  TRANSTBL_CONVERT
6916     return -TARGET_EINVAL;
6917 }
6918 
6919 static int host_to_target_flock(int type)
6920 {
6921 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6922     FLOCK_TRANSTBL
6923 #undef  TRANSTBL_CONVERT
6924     /* if we don't know how to convert the value coming
6925      * from the host we copy to the target field as-is
6926      */
6927     return type;
6928 }
6929 
6930 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6931                                             abi_ulong target_flock_addr)
6932 {
6933     struct target_flock *target_fl;
6934     int l_type;
6935 
6936     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6937         return -TARGET_EFAULT;
6938     }
6939 
6940     __get_user(l_type, &target_fl->l_type);
6941     l_type = target_to_host_flock(l_type);
6942     if (l_type < 0) {
6943         return l_type;
6944     }
6945     fl->l_type = l_type;
6946     __get_user(fl->l_whence, &target_fl->l_whence);
6947     __get_user(fl->l_start, &target_fl->l_start);
6948     __get_user(fl->l_len, &target_fl->l_len);
6949     __get_user(fl->l_pid, &target_fl->l_pid);
6950     unlock_user_struct(target_fl, target_flock_addr, 0);
6951     return 0;
6952 }
6953 
6954 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6955                                           const struct flock64 *fl)
6956 {
6957     struct target_flock *target_fl;
6958     short l_type;
6959 
6960     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6961         return -TARGET_EFAULT;
6962     }
6963 
6964     l_type = host_to_target_flock(fl->l_type);
6965     __put_user(l_type, &target_fl->l_type);
6966     __put_user(fl->l_whence, &target_fl->l_whence);
6967     __put_user(fl->l_start, &target_fl->l_start);
6968     __put_user(fl->l_len, &target_fl->l_len);
6969     __put_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 1);
6971     return 0;
6972 }
6973 
6974 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6975 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6976 
6977 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6978 struct target_oabi_flock64 {
6979     abi_short l_type;
6980     abi_short l_whence;
6981     abi_llong l_start;
6982     abi_llong l_len;
6983     abi_int   l_pid;
6984 } QEMU_PACKED;
6985 
6986 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6987                                                    abi_ulong target_flock_addr)
6988 {
6989     struct target_oabi_flock64 *target_fl;
6990     int l_type;
6991 
6992     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6993         return -TARGET_EFAULT;
6994     }
6995 
6996     __get_user(l_type, &target_fl->l_type);
6997     l_type = target_to_host_flock(l_type);
6998     if (l_type < 0) {
6999         return l_type;
7000     }
7001     fl->l_type = l_type;
7002     __get_user(fl->l_whence, &target_fl->l_whence);
7003     __get_user(fl->l_start, &target_fl->l_start);
7004     __get_user(fl->l_len, &target_fl->l_len);
7005     __get_user(fl->l_pid, &target_fl->l_pid);
7006     unlock_user_struct(target_fl, target_flock_addr, 0);
7007     return 0;
7008 }
7009 
7010 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7011                                                  const struct flock64 *fl)
7012 {
7013     struct target_oabi_flock64 *target_fl;
7014     short l_type;
7015 
7016     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7017         return -TARGET_EFAULT;
7018     }
7019 
7020     l_type = host_to_target_flock(fl->l_type);
7021     __put_user(l_type, &target_fl->l_type);
7022     __put_user(fl->l_whence, &target_fl->l_whence);
7023     __put_user(fl->l_start, &target_fl->l_start);
7024     __put_user(fl->l_len, &target_fl->l_len);
7025     __put_user(fl->l_pid, &target_fl->l_pid);
7026     unlock_user_struct(target_fl, target_flock_addr, 1);
7027     return 0;
7028 }
7029 #endif
7030 
7031 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7032                                               abi_ulong target_flock_addr)
7033 {
7034     struct target_flock64 *target_fl;
7035     int l_type;
7036 
7037     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7038         return -TARGET_EFAULT;
7039     }
7040 
7041     __get_user(l_type, &target_fl->l_type);
7042     l_type = target_to_host_flock(l_type);
7043     if (l_type < 0) {
7044         return l_type;
7045     }
7046     fl->l_type = l_type;
7047     __get_user(fl->l_whence, &target_fl->l_whence);
7048     __get_user(fl->l_start, &target_fl->l_start);
7049     __get_user(fl->l_len, &target_fl->l_len);
7050     __get_user(fl->l_pid, &target_fl->l_pid);
7051     unlock_user_struct(target_fl, target_flock_addr, 0);
7052     return 0;
7053 }
7054 
7055 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7056                                             const struct flock64 *fl)
7057 {
7058     struct target_flock64 *target_fl;
7059     short l_type;
7060 
7061     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7062         return -TARGET_EFAULT;
7063     }
7064 
7065     l_type = host_to_target_flock(fl->l_type);
7066     __put_user(l_type, &target_fl->l_type);
7067     __put_user(fl->l_whence, &target_fl->l_whence);
7068     __put_user(fl->l_start, &target_fl->l_start);
7069     __put_user(fl->l_len, &target_fl->l_len);
7070     __put_user(fl->l_pid, &target_fl->l_pid);
7071     unlock_user_struct(target_fl, target_flock_addr, 1);
7072     return 0;
7073 }
7074 
7075 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7076 {
7077     struct flock64 fl64;
7078 #ifdef F_GETOWN_EX
7079     struct f_owner_ex fox;
7080     struct target_f_owner_ex *target_fox;
7081 #endif
7082     abi_long ret;
7083     int host_cmd = target_to_host_fcntl_cmd(cmd);
7084 
7085     if (host_cmd == -TARGET_EINVAL)
7086 	    return host_cmd;
7087 
7088     switch(cmd) {
7089     case TARGET_F_GETLK:
7090         ret = copy_from_user_flock(&fl64, arg);
7091         if (ret) {
7092             return ret;
7093         }
7094         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7095         if (ret == 0) {
7096             ret = copy_to_user_flock(arg, &fl64);
7097         }
7098         break;
7099 
7100     case TARGET_F_SETLK:
7101     case TARGET_F_SETLKW:
7102         ret = copy_from_user_flock(&fl64, arg);
7103         if (ret) {
7104             return ret;
7105         }
7106         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7107         break;
7108 
7109     case TARGET_F_GETLK64:
7110     case TARGET_F_OFD_GETLK:
7111         ret = copy_from_user_flock64(&fl64, arg);
7112         if (ret) {
7113             return ret;
7114         }
7115         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7116         if (ret == 0) {
7117             ret = copy_to_user_flock64(arg, &fl64);
7118         }
7119         break;
7120     case TARGET_F_SETLK64:
7121     case TARGET_F_SETLKW64:
7122     case TARGET_F_OFD_SETLK:
7123     case TARGET_F_OFD_SETLKW:
7124         ret = copy_from_user_flock64(&fl64, arg);
7125         if (ret) {
7126             return ret;
7127         }
7128         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7129         break;
7130 
7131     case TARGET_F_GETFL:
7132         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7133         if (ret >= 0) {
7134             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7135             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7136             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7137                 ret |= TARGET_O_LARGEFILE;
7138             }
7139         }
7140         break;
7141 
7142     case TARGET_F_SETFL:
7143         ret = get_errno(safe_fcntl(fd, host_cmd,
7144                                    target_to_host_bitmask(arg,
7145                                                           fcntl_flags_tbl)));
7146         break;
7147 
7148 #ifdef F_GETOWN_EX
7149     case TARGET_F_GETOWN_EX:
7150         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7151         if (ret >= 0) {
7152             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7153                 return -TARGET_EFAULT;
7154             target_fox->type = tswap32(fox.type);
7155             target_fox->pid = tswap32(fox.pid);
7156             unlock_user_struct(target_fox, arg, 1);
7157         }
7158         break;
7159 #endif
7160 
7161 #ifdef F_SETOWN_EX
7162     case TARGET_F_SETOWN_EX:
7163         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7164             return -TARGET_EFAULT;
7165         fox.type = tswap32(target_fox->type);
7166         fox.pid = tswap32(target_fox->pid);
7167         unlock_user_struct(target_fox, arg, 0);
7168         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7169         break;
7170 #endif
7171 
7172     case TARGET_F_SETSIG:
7173         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7174         break;
7175 
7176     case TARGET_F_GETSIG:
7177         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7178         break;
7179 
7180     case TARGET_F_SETOWN:
7181     case TARGET_F_GETOWN:
7182     case TARGET_F_SETLEASE:
7183     case TARGET_F_GETLEASE:
7184     case TARGET_F_SETPIPE_SZ:
7185     case TARGET_F_GETPIPE_SZ:
7186     case TARGET_F_ADD_SEALS:
7187     case TARGET_F_GET_SEALS:
7188         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7189         break;
7190 
7191     default:
7192         ret = get_errno(safe_fcntl(fd, cmd, arg));
7193         break;
7194     }
7195     return ret;
7196 }
7197 
7198 #ifdef USE_UID16
7199 
7200 static inline int high2lowuid(int uid)
7201 {
7202     if (uid > 65535)
7203         return 65534;
7204     else
7205         return uid;
7206 }
7207 
7208 static inline int high2lowgid(int gid)
7209 {
7210     if (gid > 65535)
7211         return 65534;
7212     else
7213         return gid;
7214 }
7215 
7216 static inline int low2highuid(int uid)
7217 {
7218     if ((int16_t)uid == -1)
7219         return -1;
7220     else
7221         return uid;
7222 }
7223 
7224 static inline int low2highgid(int gid)
7225 {
7226     if ((int16_t)gid == -1)
7227         return -1;
7228     else
7229         return gid;
7230 }
7231 static inline int tswapid(int id)
7232 {
7233     return tswap16(id);
7234 }
7235 
7236 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7237 
7238 #else /* !USE_UID16 */
7239 static inline int high2lowuid(int uid)
7240 {
7241     return uid;
7242 }
7243 static inline int high2lowgid(int gid)
7244 {
7245     return gid;
7246 }
7247 static inline int low2highuid(int uid)
7248 {
7249     return uid;
7250 }
7251 static inline int low2highgid(int gid)
7252 {
7253     return gid;
7254 }
7255 static inline int tswapid(int id)
7256 {
7257     return tswap32(id);
7258 }
7259 
7260 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7261 
7262 #endif /* USE_UID16 */
7263 
7264 /* We must do direct syscalls for setting UID/GID, because we want to
7265  * implement the Linux system call semantics of "change only for this thread",
7266  * not the libc/POSIX semantics of "change for all threads in process".
7267  * (See http://ewontfix.com/17/ for more details.)
7268  * We use the 32-bit version of the syscalls if present; if it is not
7269  * then either the host architecture supports 32-bit UIDs natively with
7270  * the standard syscall, or the 16-bit UID is the best we can do.
7271  */
7272 #ifdef __NR_setuid32
7273 #define __NR_sys_setuid __NR_setuid32
7274 #else
7275 #define __NR_sys_setuid __NR_setuid
7276 #endif
7277 #ifdef __NR_setgid32
7278 #define __NR_sys_setgid __NR_setgid32
7279 #else
7280 #define __NR_sys_setgid __NR_setgid
7281 #endif
7282 #ifdef __NR_setresuid32
7283 #define __NR_sys_setresuid __NR_setresuid32
7284 #else
7285 #define __NR_sys_setresuid __NR_setresuid
7286 #endif
7287 #ifdef __NR_setresgid32
7288 #define __NR_sys_setresgid __NR_setresgid32
7289 #else
7290 #define __NR_sys_setresgid __NR_setresgid
7291 #endif
7292 
7293 _syscall1(int, sys_setuid, uid_t, uid)
7294 _syscall1(int, sys_setgid, gid_t, gid)
7295 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7296 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7297 
7298 void syscall_init(void)
7299 {
7300     IOCTLEntry *ie;
7301     const argtype *arg_type;
7302     int size;
7303 
7304     thunk_init(STRUCT_MAX);
7305 
7306 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7307 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7308 #include "syscall_types.h"
7309 #undef STRUCT
7310 #undef STRUCT_SPECIAL
7311 
7312     /* we patch the ioctl size if necessary. We rely on the fact that
7313        no ioctl has all the bits at '1' in the size field */
7314     ie = ioctl_entries;
7315     while (ie->target_cmd != 0) {
7316         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7317             TARGET_IOC_SIZEMASK) {
7318             arg_type = ie->arg_type;
7319             if (arg_type[0] != TYPE_PTR) {
7320                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7321                         ie->target_cmd);
7322                 exit(1);
7323             }
7324             arg_type++;
7325             size = thunk_type_size(arg_type, 0);
7326             ie->target_cmd = (ie->target_cmd &
7327                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7328                 (size << TARGET_IOC_SIZESHIFT);
7329         }
7330 
7331         /* automatic consistency check if same arch */
7332 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7333     (defined(__x86_64__) && defined(TARGET_X86_64))
7334         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7335             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7336                     ie->name, ie->target_cmd, ie->host_cmd);
7337         }
7338 #endif
7339         ie++;
7340     }
7341 }
7342 
7343 #ifdef TARGET_NR_truncate64
7344 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7345                                          abi_long arg2,
7346                                          abi_long arg3,
7347                                          abi_long arg4)
7348 {
7349     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7350         arg2 = arg3;
7351         arg3 = arg4;
7352     }
7353     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7354 }
7355 #endif
7356 
7357 #ifdef TARGET_NR_ftruncate64
7358 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7359                                           abi_long arg2,
7360                                           abi_long arg3,
7361                                           abi_long arg4)
7362 {
7363     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7364         arg2 = arg3;
7365         arg3 = arg4;
7366     }
7367     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7368 }
7369 #endif
7370 
7371 #if defined(TARGET_NR_timer_settime) || \
7372     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7373 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7374                                                  abi_ulong target_addr)
7375 {
7376     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7377                                 offsetof(struct target_itimerspec,
7378                                          it_interval)) ||
7379         target_to_host_timespec(&host_its->it_value, target_addr +
7380                                 offsetof(struct target_itimerspec,
7381                                          it_value))) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     return 0;
7386 }
7387 #endif
7388 
7389 #if defined(TARGET_NR_timer_settime64) || \
7390     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7391 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7392                                                    abi_ulong target_addr)
7393 {
7394     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7395                                   offsetof(struct target__kernel_itimerspec,
7396                                            it_interval)) ||
7397         target_to_host_timespec64(&host_its->it_value, target_addr +
7398                                   offsetof(struct target__kernel_itimerspec,
7399                                            it_value))) {
7400         return -TARGET_EFAULT;
7401     }
7402 
7403     return 0;
7404 }
7405 #endif
7406 
7407 #if ((defined(TARGET_NR_timerfd_gettime) || \
7408       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7409       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7410 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7411                                                  struct itimerspec *host_its)
7412 {
7413     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7414                                                        it_interval),
7415                                 &host_its->it_interval) ||
7416         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7417                                                        it_value),
7418                                 &host_its->it_value)) {
7419         return -TARGET_EFAULT;
7420     }
7421     return 0;
7422 }
7423 #endif
7424 
7425 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7426       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7427       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7428 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7429                                                    struct itimerspec *host_its)
7430 {
7431     if (host_to_target_timespec64(target_addr +
7432                                   offsetof(struct target__kernel_itimerspec,
7433                                            it_interval),
7434                                   &host_its->it_interval) ||
7435         host_to_target_timespec64(target_addr +
7436                                   offsetof(struct target__kernel_itimerspec,
7437                                            it_value),
7438                                   &host_its->it_value)) {
7439         return -TARGET_EFAULT;
7440     }
7441     return 0;
7442 }
7443 #endif
7444 
7445 #if defined(TARGET_NR_adjtimex) || \
7446     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7447 static inline abi_long target_to_host_timex(struct timex *host_tx,
7448                                             abi_long target_addr)
7449 {
7450     struct target_timex *target_tx;
7451 
7452     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7453         return -TARGET_EFAULT;
7454     }
7455 
7456     __get_user(host_tx->modes, &target_tx->modes);
7457     __get_user(host_tx->offset, &target_tx->offset);
7458     __get_user(host_tx->freq, &target_tx->freq);
7459     __get_user(host_tx->maxerror, &target_tx->maxerror);
7460     __get_user(host_tx->esterror, &target_tx->esterror);
7461     __get_user(host_tx->status, &target_tx->status);
7462     __get_user(host_tx->constant, &target_tx->constant);
7463     __get_user(host_tx->precision, &target_tx->precision);
7464     __get_user(host_tx->tolerance, &target_tx->tolerance);
7465     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7466     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7467     __get_user(host_tx->tick, &target_tx->tick);
7468     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7469     __get_user(host_tx->jitter, &target_tx->jitter);
7470     __get_user(host_tx->shift, &target_tx->shift);
7471     __get_user(host_tx->stabil, &target_tx->stabil);
7472     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7473     __get_user(host_tx->calcnt, &target_tx->calcnt);
7474     __get_user(host_tx->errcnt, &target_tx->errcnt);
7475     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7476     __get_user(host_tx->tai, &target_tx->tai);
7477 
7478     unlock_user_struct(target_tx, target_addr, 0);
7479     return 0;
7480 }
7481 
7482 static inline abi_long host_to_target_timex(abi_long target_addr,
7483                                             struct timex *host_tx)
7484 {
7485     struct target_timex *target_tx;
7486 
7487     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7488         return -TARGET_EFAULT;
7489     }
7490 
7491     __put_user(host_tx->modes, &target_tx->modes);
7492     __put_user(host_tx->offset, &target_tx->offset);
7493     __put_user(host_tx->freq, &target_tx->freq);
7494     __put_user(host_tx->maxerror, &target_tx->maxerror);
7495     __put_user(host_tx->esterror, &target_tx->esterror);
7496     __put_user(host_tx->status, &target_tx->status);
7497     __put_user(host_tx->constant, &target_tx->constant);
7498     __put_user(host_tx->precision, &target_tx->precision);
7499     __put_user(host_tx->tolerance, &target_tx->tolerance);
7500     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7501     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7502     __put_user(host_tx->tick, &target_tx->tick);
7503     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7504     __put_user(host_tx->jitter, &target_tx->jitter);
7505     __put_user(host_tx->shift, &target_tx->shift);
7506     __put_user(host_tx->stabil, &target_tx->stabil);
7507     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7508     __put_user(host_tx->calcnt, &target_tx->calcnt);
7509     __put_user(host_tx->errcnt, &target_tx->errcnt);
7510     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7511     __put_user(host_tx->tai, &target_tx->tai);
7512 
7513     unlock_user_struct(target_tx, target_addr, 1);
7514     return 0;
7515 }
7516 #endif
7517 
7518 
7519 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7520 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7521                                               abi_long target_addr)
7522 {
7523     struct target__kernel_timex *target_tx;
7524 
7525     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7526                                  offsetof(struct target__kernel_timex,
7527                                           time))) {
7528         return -TARGET_EFAULT;
7529     }
7530 
7531     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7532         return -TARGET_EFAULT;
7533     }
7534 
7535     __get_user(host_tx->modes, &target_tx->modes);
7536     __get_user(host_tx->offset, &target_tx->offset);
7537     __get_user(host_tx->freq, &target_tx->freq);
7538     __get_user(host_tx->maxerror, &target_tx->maxerror);
7539     __get_user(host_tx->esterror, &target_tx->esterror);
7540     __get_user(host_tx->status, &target_tx->status);
7541     __get_user(host_tx->constant, &target_tx->constant);
7542     __get_user(host_tx->precision, &target_tx->precision);
7543     __get_user(host_tx->tolerance, &target_tx->tolerance);
7544     __get_user(host_tx->tick, &target_tx->tick);
7545     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7546     __get_user(host_tx->jitter, &target_tx->jitter);
7547     __get_user(host_tx->shift, &target_tx->shift);
7548     __get_user(host_tx->stabil, &target_tx->stabil);
7549     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7550     __get_user(host_tx->calcnt, &target_tx->calcnt);
7551     __get_user(host_tx->errcnt, &target_tx->errcnt);
7552     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7553     __get_user(host_tx->tai, &target_tx->tai);
7554 
7555     unlock_user_struct(target_tx, target_addr, 0);
7556     return 0;
7557 }
7558 
7559 static inline abi_long host_to_target_timex64(abi_long target_addr,
7560                                               struct timex *host_tx)
7561 {
7562     struct target__kernel_timex *target_tx;
7563 
7564    if (copy_to_user_timeval64(target_addr +
7565                               offsetof(struct target__kernel_timex, time),
7566                               &host_tx->time)) {
7567         return -TARGET_EFAULT;
7568     }
7569 
7570     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7571         return -TARGET_EFAULT;
7572     }
7573 
7574     __put_user(host_tx->modes, &target_tx->modes);
7575     __put_user(host_tx->offset, &target_tx->offset);
7576     __put_user(host_tx->freq, &target_tx->freq);
7577     __put_user(host_tx->maxerror, &target_tx->maxerror);
7578     __put_user(host_tx->esterror, &target_tx->esterror);
7579     __put_user(host_tx->status, &target_tx->status);
7580     __put_user(host_tx->constant, &target_tx->constant);
7581     __put_user(host_tx->precision, &target_tx->precision);
7582     __put_user(host_tx->tolerance, &target_tx->tolerance);
7583     __put_user(host_tx->tick, &target_tx->tick);
7584     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7585     __put_user(host_tx->jitter, &target_tx->jitter);
7586     __put_user(host_tx->shift, &target_tx->shift);
7587     __put_user(host_tx->stabil, &target_tx->stabil);
7588     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7589     __put_user(host_tx->calcnt, &target_tx->calcnt);
7590     __put_user(host_tx->errcnt, &target_tx->errcnt);
7591     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7592     __put_user(host_tx->tai, &target_tx->tai);
7593 
7594     unlock_user_struct(target_tx, target_addr, 1);
7595     return 0;
7596 }
7597 #endif
7598 
7599 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7600 #define sigev_notify_thread_id _sigev_un._tid
7601 #endif
7602 
7603 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7604                                                abi_ulong target_addr)
7605 {
7606     struct target_sigevent *target_sevp;
7607 
7608     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7609         return -TARGET_EFAULT;
7610     }
7611 
7612     /* This union is awkward on 64 bit systems because it has a 32 bit
7613      * integer and a pointer in it; we follow the conversion approach
7614      * used for handling sigval types in signal.c so the guest should get
7615      * the correct value back even if we did a 64 bit byteswap and it's
7616      * using the 32 bit integer.
7617      */
7618     host_sevp->sigev_value.sival_ptr =
7619         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7620     host_sevp->sigev_signo =
7621         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7622     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7623     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7624 
7625     unlock_user_struct(target_sevp, target_addr, 1);
7626     return 0;
7627 }
7628 
7629 #if defined(TARGET_NR_mlockall)
7630 static inline int target_to_host_mlockall_arg(int arg)
7631 {
7632     int result = 0;
7633 
7634     if (arg & TARGET_MCL_CURRENT) {
7635         result |= MCL_CURRENT;
7636     }
7637     if (arg & TARGET_MCL_FUTURE) {
7638         result |= MCL_FUTURE;
7639     }
7640 #ifdef MCL_ONFAULT
7641     if (arg & TARGET_MCL_ONFAULT) {
7642         result |= MCL_ONFAULT;
7643     }
7644 #endif
7645 
7646     return result;
7647 }
7648 #endif
7649 
7650 static inline int target_to_host_msync_arg(abi_long arg)
7651 {
7652     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7653            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7654            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7655            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7656 }
7657 
7658 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7659      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7660      defined(TARGET_NR_newfstatat))
7661 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7662                                              abi_ulong target_addr,
7663                                              struct stat *host_st)
7664 {
7665 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7666     if (cpu_env->eabi) {
7667         struct target_eabi_stat64 *target_st;
7668 
7669         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7670             return -TARGET_EFAULT;
7671         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7672         __put_user(host_st->st_dev, &target_st->st_dev);
7673         __put_user(host_st->st_ino, &target_st->st_ino);
7674 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7675         __put_user(host_st->st_ino, &target_st->__st_ino);
7676 #endif
7677         __put_user(host_st->st_mode, &target_st->st_mode);
7678         __put_user(host_st->st_nlink, &target_st->st_nlink);
7679         __put_user(host_st->st_uid, &target_st->st_uid);
7680         __put_user(host_st->st_gid, &target_st->st_gid);
7681         __put_user(host_st->st_rdev, &target_st->st_rdev);
7682         __put_user(host_st->st_size, &target_st->st_size);
7683         __put_user(host_st->st_blksize, &target_st->st_blksize);
7684         __put_user(host_st->st_blocks, &target_st->st_blocks);
7685         __put_user(host_st->st_atime, &target_st->target_st_atime);
7686         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7687         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7688 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7689         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7690         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7691         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7692 #endif
7693         unlock_user_struct(target_st, target_addr, 1);
7694     } else
7695 #endif
7696     {
7697 #if defined(TARGET_HAS_STRUCT_STAT64)
7698         struct target_stat64 *target_st;
7699 #else
7700         struct target_stat *target_st;
7701 #endif
7702 
7703         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7704             return -TARGET_EFAULT;
7705         memset(target_st, 0, sizeof(*target_st));
7706         __put_user(host_st->st_dev, &target_st->st_dev);
7707         __put_user(host_st->st_ino, &target_st->st_ino);
7708 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7709         __put_user(host_st->st_ino, &target_st->__st_ino);
7710 #endif
7711         __put_user(host_st->st_mode, &target_st->st_mode);
7712         __put_user(host_st->st_nlink, &target_st->st_nlink);
7713         __put_user(host_st->st_uid, &target_st->st_uid);
7714         __put_user(host_st->st_gid, &target_st->st_gid);
7715         __put_user(host_st->st_rdev, &target_st->st_rdev);
7716         /* XXX: better use of kernel struct */
7717         __put_user(host_st->st_size, &target_st->st_size);
7718         __put_user(host_st->st_blksize, &target_st->st_blksize);
7719         __put_user(host_st->st_blocks, &target_st->st_blocks);
7720         __put_user(host_st->st_atime, &target_st->target_st_atime);
7721         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7722         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7723 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7724         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7725         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7726         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7727 #endif
7728         unlock_user_struct(target_st, target_addr, 1);
7729     }
7730 
7731     return 0;
7732 }
7733 #endif
7734 
7735 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7736 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7737                                             abi_ulong target_addr)
7738 {
7739     struct target_statx *target_stx;
7740 
7741     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7742         return -TARGET_EFAULT;
7743     }
7744     memset(target_stx, 0, sizeof(*target_stx));
7745 
7746     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7747     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7748     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7749     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7750     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7751     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7752     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7753     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7754     __put_user(host_stx->stx_size, &target_stx->stx_size);
7755     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7756     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7757     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7758     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7759     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7760     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7761     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7762     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7763     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7764     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7765     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7766     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7767     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7768     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7769 
7770     unlock_user_struct(target_stx, target_addr, 1);
7771 
7772     return 0;
7773 }
7774 #endif
7775 
7776 static int do_sys_futex(int *uaddr, int op, int val,
7777                          const struct timespec *timeout, int *uaddr2,
7778                          int val3)
7779 {
7780 #if HOST_LONG_BITS == 64
7781 #if defined(__NR_futex)
7782     /* always a 64-bit time_t, it doesn't define _time64 version  */
7783     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7784 
7785 #endif
7786 #else /* HOST_LONG_BITS == 64 */
7787 #if defined(__NR_futex_time64)
7788     if (sizeof(timeout->tv_sec) == 8) {
7789         /* _time64 function on 32bit arch */
7790         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7791     }
7792 #endif
7793 #if defined(__NR_futex)
7794     /* old function on 32bit arch */
7795     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7796 #endif
7797 #endif /* HOST_LONG_BITS == 64 */
7798     g_assert_not_reached();
7799 }
7800 
7801 static int do_safe_futex(int *uaddr, int op, int val,
7802                          const struct timespec *timeout, int *uaddr2,
7803                          int val3)
7804 {
7805 #if HOST_LONG_BITS == 64
7806 #if defined(__NR_futex)
7807     /* always a 64-bit time_t, it doesn't define _time64 version  */
7808     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7809 #endif
7810 #else /* HOST_LONG_BITS == 64 */
7811 #if defined(__NR_futex_time64)
7812     if (sizeof(timeout->tv_sec) == 8) {
7813         /* _time64 function on 32bit arch */
7814         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7815                                            val3));
7816     }
7817 #endif
7818 #if defined(__NR_futex)
7819     /* old function on 32bit arch */
7820     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7821 #endif
7822 #endif /* HOST_LONG_BITS == 64 */
7823     return -TARGET_ENOSYS;
7824 }
7825 
7826 /* ??? Using host futex calls even when target atomic operations
7827    are not really atomic probably breaks things.  However implementing
7828    futexes locally would make futexes shared between multiple processes
7829    tricky.  However they're probably useless because guest atomic
7830    operations won't work either.  */
7831 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7832 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7833                     int op, int val, target_ulong timeout,
7834                     target_ulong uaddr2, int val3)
7835 {
7836     struct timespec ts, *pts = NULL;
7837     void *haddr2 = NULL;
7838     int base_op;
7839 
7840     /* We assume FUTEX_* constants are the same on both host and target. */
7841 #ifdef FUTEX_CMD_MASK
7842     base_op = op & FUTEX_CMD_MASK;
7843 #else
7844     base_op = op;
7845 #endif
7846     switch (base_op) {
7847     case FUTEX_WAIT:
7848     case FUTEX_WAIT_BITSET:
7849         val = tswap32(val);
7850         break;
7851     case FUTEX_WAIT_REQUEUE_PI:
7852         val = tswap32(val);
7853         haddr2 = g2h(cpu, uaddr2);
7854         break;
7855     case FUTEX_LOCK_PI:
7856     case FUTEX_LOCK_PI2:
7857         break;
7858     case FUTEX_WAKE:
7859     case FUTEX_WAKE_BITSET:
7860     case FUTEX_TRYLOCK_PI:
7861     case FUTEX_UNLOCK_PI:
7862         timeout = 0;
7863         break;
7864     case FUTEX_FD:
7865         val = target_to_host_signal(val);
7866         timeout = 0;
7867         break;
7868     case FUTEX_CMP_REQUEUE:
7869     case FUTEX_CMP_REQUEUE_PI:
7870         val3 = tswap32(val3);
7871         /* fall through */
7872     case FUTEX_REQUEUE:
7873     case FUTEX_WAKE_OP:
7874         /*
7875          * For these, the 4th argument is not TIMEOUT, but VAL2.
7876          * But the prototype of do_safe_futex takes a pointer, so
7877          * insert casts to satisfy the compiler.  We do not need
7878          * to tswap VAL2 since it's not compared to guest memory.
7879           */
7880         pts = (struct timespec *)(uintptr_t)timeout;
7881         timeout = 0;
7882         haddr2 = g2h(cpu, uaddr2);
7883         break;
7884     default:
7885         return -TARGET_ENOSYS;
7886     }
7887     if (timeout) {
7888         pts = &ts;
7889         if (time64
7890             ? target_to_host_timespec64(pts, timeout)
7891             : target_to_host_timespec(pts, timeout)) {
7892             return -TARGET_EFAULT;
7893         }
7894     }
7895     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7896 }
7897 #endif
7898 
7899 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7900 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7901                                      abi_long handle, abi_long mount_id,
7902                                      abi_long flags)
7903 {
7904     struct file_handle *target_fh;
7905     struct file_handle *fh;
7906     int mid = 0;
7907     abi_long ret;
7908     char *name;
7909     unsigned int size, total_size;
7910 
7911     if (get_user_s32(size, handle)) {
7912         return -TARGET_EFAULT;
7913     }
7914 
7915     name = lock_user_string(pathname);
7916     if (!name) {
7917         return -TARGET_EFAULT;
7918     }
7919 
7920     total_size = sizeof(struct file_handle) + size;
7921     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7922     if (!target_fh) {
7923         unlock_user(name, pathname, 0);
7924         return -TARGET_EFAULT;
7925     }
7926 
7927     fh = g_malloc0(total_size);
7928     fh->handle_bytes = size;
7929 
7930     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7931     unlock_user(name, pathname, 0);
7932 
7933     /* man name_to_handle_at(2):
7934      * Other than the use of the handle_bytes field, the caller should treat
7935      * the file_handle structure as an opaque data type
7936      */
7937 
7938     memcpy(target_fh, fh, total_size);
7939     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7940     target_fh->handle_type = tswap32(fh->handle_type);
7941     g_free(fh);
7942     unlock_user(target_fh, handle, total_size);
7943 
7944     if (put_user_s32(mid, mount_id)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     return ret;
7949 
7950 }
7951 #endif
7952 
7953 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7954 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7955                                      abi_long flags)
7956 {
7957     struct file_handle *target_fh;
7958     struct file_handle *fh;
7959     unsigned int size, total_size;
7960     abi_long ret;
7961 
7962     if (get_user_s32(size, handle)) {
7963         return -TARGET_EFAULT;
7964     }
7965 
7966     total_size = sizeof(struct file_handle) + size;
7967     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7968     if (!target_fh) {
7969         return -TARGET_EFAULT;
7970     }
7971 
7972     fh = g_memdup(target_fh, total_size);
7973     fh->handle_bytes = size;
7974     fh->handle_type = tswap32(target_fh->handle_type);
7975 
7976     ret = get_errno(open_by_handle_at(mount_fd, fh,
7977                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7978 
7979     g_free(fh);
7980 
7981     unlock_user(target_fh, handle, total_size);
7982 
7983     return ret;
7984 }
7985 #endif
7986 
7987 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7988 
7989 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7990 {
7991     int host_flags;
7992     target_sigset_t *target_mask;
7993     sigset_t host_mask;
7994     abi_long ret;
7995 
7996     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7997         return -TARGET_EINVAL;
7998     }
7999     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8000         return -TARGET_EFAULT;
8001     }
8002 
8003     target_to_host_sigset(&host_mask, target_mask);
8004 
8005     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8006 
8007     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8008     if (ret >= 0) {
8009         fd_trans_register(ret, &target_signalfd_trans);
8010     }
8011 
8012     unlock_user_struct(target_mask, mask, 0);
8013 
8014     return ret;
8015 }
8016 #endif
8017 
8018 /* Map host to target signal numbers for the wait family of syscalls.
8019    Assume all other status bits are the same.  */
8020 int host_to_target_waitstatus(int status)
8021 {
8022     if (WIFSIGNALED(status)) {
8023         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8024     }
8025     if (WIFSTOPPED(status)) {
8026         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8027                | (status & 0xff);
8028     }
8029     return status;
8030 }
8031 
8032 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8033 {
8034     CPUState *cpu = env_cpu(cpu_env);
8035     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8036     int i;
8037 
8038     for (i = 0; i < bprm->argc; i++) {
8039         size_t len = strlen(bprm->argv[i]) + 1;
8040 
8041         if (write(fd, bprm->argv[i], len) != len) {
8042             return -1;
8043         }
8044     }
8045 
8046     return 0;
8047 }
8048 
8049 static void show_smaps(int fd, unsigned long size)
8050 {
8051     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8052     unsigned long size_kb = size >> 10;
8053 
8054     dprintf(fd, "Size:                  %lu kB\n"
8055                 "KernelPageSize:        %lu kB\n"
8056                 "MMUPageSize:           %lu kB\n"
8057                 "Rss:                   0 kB\n"
8058                 "Pss:                   0 kB\n"
8059                 "Pss_Dirty:             0 kB\n"
8060                 "Shared_Clean:          0 kB\n"
8061                 "Shared_Dirty:          0 kB\n"
8062                 "Private_Clean:         0 kB\n"
8063                 "Private_Dirty:         0 kB\n"
8064                 "Referenced:            0 kB\n"
8065                 "Anonymous:             0 kB\n"
8066                 "LazyFree:              0 kB\n"
8067                 "AnonHugePages:         0 kB\n"
8068                 "ShmemPmdMapped:        0 kB\n"
8069                 "FilePmdMapped:         0 kB\n"
8070                 "Shared_Hugetlb:        0 kB\n"
8071                 "Private_Hugetlb:       0 kB\n"
8072                 "Swap:                  0 kB\n"
8073                 "SwapPss:               0 kB\n"
8074                 "Locked:                0 kB\n"
8075                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8076 }
8077 
8078 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8079 {
8080     CPUState *cpu = env_cpu(cpu_env);
8081     TaskState *ts = cpu->opaque;
8082     GSList *map_info = read_self_maps();
8083     GSList *s;
8084     int count;
8085 
8086     for (s = map_info; s; s = g_slist_next(s)) {
8087         MapInfo *e = (MapInfo *) s->data;
8088 
8089         if (h2g_valid(e->start)) {
8090             unsigned long min = e->start;
8091             unsigned long max = e->end;
8092             int flags = page_get_flags(h2g(min));
8093             const char *path;
8094 
8095             max = h2g_valid(max - 1) ?
8096                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8097 
8098             if (page_check_range(h2g(min), max - min, flags) == -1) {
8099                 continue;
8100             }
8101 
8102 #ifdef TARGET_HPPA
8103             if (h2g(max) == ts->info->stack_limit) {
8104 #else
8105             if (h2g(min) == ts->info->stack_limit) {
8106 #endif
8107                 path = "[stack]";
8108             } else {
8109                 path = e->path;
8110             }
8111 
8112             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8113                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8114                             h2g(min), h2g(max - 1) + 1,
8115                             (flags & PAGE_READ) ? 'r' : '-',
8116                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8117                             (flags & PAGE_EXEC) ? 'x' : '-',
8118                             e->is_priv ? 'p' : 's',
8119                             (uint64_t) e->offset, e->dev, e->inode);
8120             if (path) {
8121                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8122             } else {
8123                 dprintf(fd, "\n");
8124             }
8125             if (smaps) {
8126                 show_smaps(fd, max - min);
8127                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8128                         (flags & PAGE_READ) ? " rd" : "",
8129                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8130                         (flags & PAGE_EXEC) ? " ex" : "",
8131                         e->is_priv ? "" : " sh",
8132                         (flags & PAGE_READ) ? " mr" : "",
8133                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8134                         (flags & PAGE_EXEC) ? " me" : "",
8135                         e->is_priv ? "" : " ms");
8136             }
8137         }
8138     }
8139 
8140     free_self_maps(map_info);
8141 
8142 #ifdef TARGET_VSYSCALL_PAGE
8143     /*
8144      * We only support execution from the vsyscall page.
8145      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8146      */
8147     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8148                     " --xp 00000000 00:00 0",
8149                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8150     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8151     if (smaps) {
8152         show_smaps(fd, TARGET_PAGE_SIZE);
8153         dprintf(fd, "VmFlags: ex\n");
8154     }
8155 #endif
8156 
8157     return 0;
8158 }
8159 
8160 static int open_self_maps(CPUArchState *cpu_env, int fd)
8161 {
8162     return open_self_maps_1(cpu_env, fd, false);
8163 }
8164 
8165 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8166 {
8167     return open_self_maps_1(cpu_env, fd, true);
8168 }
8169 
8170 static int open_self_stat(CPUArchState *cpu_env, int fd)
8171 {
8172     CPUState *cpu = env_cpu(cpu_env);
8173     TaskState *ts = cpu->opaque;
8174     g_autoptr(GString) buf = g_string_new(NULL);
8175     int i;
8176 
8177     for (i = 0; i < 44; i++) {
8178         if (i == 0) {
8179             /* pid */
8180             g_string_printf(buf, FMT_pid " ", getpid());
8181         } else if (i == 1) {
8182             /* app name */
8183             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8184             bin = bin ? bin + 1 : ts->bprm->argv[0];
8185             g_string_printf(buf, "(%.15s) ", bin);
8186         } else if (i == 2) {
8187             /* task state */
8188             g_string_assign(buf, "R "); /* we are running right now */
8189         } else if (i == 3) {
8190             /* ppid */
8191             g_string_printf(buf, FMT_pid " ", getppid());
8192         } else if (i == 21) {
8193             /* starttime */
8194             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8195         } else if (i == 27) {
8196             /* stack bottom */
8197             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8198         } else {
8199             /* for the rest, there is MasterCard */
8200             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8201         }
8202 
8203         if (write(fd, buf->str, buf->len) != buf->len) {
8204             return -1;
8205         }
8206     }
8207 
8208     return 0;
8209 }
8210 
8211 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8212 {
8213     CPUState *cpu = env_cpu(cpu_env);
8214     TaskState *ts = cpu->opaque;
8215     abi_ulong auxv = ts->info->saved_auxv;
8216     abi_ulong len = ts->info->auxv_len;
8217     char *ptr;
8218 
8219     /*
8220      * Auxiliary vector is stored in target process stack.
8221      * read in whole auxv vector and copy it to file
8222      */
8223     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8224     if (ptr != NULL) {
8225         while (len > 0) {
8226             ssize_t r;
8227             r = write(fd, ptr, len);
8228             if (r <= 0) {
8229                 break;
8230             }
8231             len -= r;
8232             ptr += r;
8233         }
8234         lseek(fd, 0, SEEK_SET);
8235         unlock_user(ptr, auxv, len);
8236     }
8237 
8238     return 0;
8239 }
8240 
8241 static int is_proc_myself(const char *filename, const char *entry)
8242 {
8243     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8244         filename += strlen("/proc/");
8245         if (!strncmp(filename, "self/", strlen("self/"))) {
8246             filename += strlen("self/");
8247         } else if (*filename >= '1' && *filename <= '9') {
8248             char myself[80];
8249             snprintf(myself, sizeof(myself), "%d/", getpid());
8250             if (!strncmp(filename, myself, strlen(myself))) {
8251                 filename += strlen(myself);
8252             } else {
8253                 return 0;
8254             }
8255         } else {
8256             return 0;
8257         }
8258         if (!strcmp(filename, entry)) {
8259             return 1;
8260         }
8261     }
8262     return 0;
8263 }
8264 
8265 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8266                       const char *fmt, int code)
8267 {
8268     if (logfile) {
8269         CPUState *cs = env_cpu(env);
8270 
8271         fprintf(logfile, fmt, code);
8272         fprintf(logfile, "Failing executable: %s\n", exec_path);
8273         cpu_dump_state(cs, logfile, 0);
8274         open_self_maps(env, fileno(logfile));
8275     }
8276 }
8277 
8278 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8279 {
8280     /* dump to console */
8281     excp_dump_file(stderr, env, fmt, code);
8282 
8283     /* dump to log file */
8284     if (qemu_log_separate()) {
8285         FILE *logfile = qemu_log_trylock();
8286 
8287         excp_dump_file(logfile, env, fmt, code);
8288         qemu_log_unlock(logfile);
8289     }
8290 }
8291 
8292 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8293     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8294     defined(TARGET_RISCV) || defined(TARGET_S390X)
8295 static int is_proc(const char *filename, const char *entry)
8296 {
8297     return strcmp(filename, entry) == 0;
8298 }
8299 #endif
8300 
8301 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8302 static int open_net_route(CPUArchState *cpu_env, int fd)
8303 {
8304     FILE *fp;
8305     char *line = NULL;
8306     size_t len = 0;
8307     ssize_t read;
8308 
8309     fp = fopen("/proc/net/route", "r");
8310     if (fp == NULL) {
8311         return -1;
8312     }
8313 
8314     /* read header */
8315 
8316     read = getline(&line, &len, fp);
8317     dprintf(fd, "%s", line);
8318 
8319     /* read routes */
8320 
8321     while ((read = getline(&line, &len, fp)) != -1) {
8322         char iface[16];
8323         uint32_t dest, gw, mask;
8324         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8325         int fields;
8326 
8327         fields = sscanf(line,
8328                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8329                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8330                         &mask, &mtu, &window, &irtt);
8331         if (fields != 11) {
8332             continue;
8333         }
8334         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8335                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8336                 metric, tswap32(mask), mtu, window, irtt);
8337     }
8338 
8339     free(line);
8340     fclose(fp);
8341 
8342     return 0;
8343 }
8344 #endif
8345 
8346 #if defined(TARGET_SPARC)
8347 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8348 {
8349     dprintf(fd, "type\t\t: sun4u\n");
8350     return 0;
8351 }
8352 #endif
8353 
8354 #if defined(TARGET_HPPA)
8355 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8356 {
8357     int i, num_cpus;
8358 
8359     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8360     for (i = 0; i < num_cpus; i++) {
8361         dprintf(fd, "processor\t: %d\n", i);
8362         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8363         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8364         dprintf(fd, "capabilities\t: os32\n");
8365         dprintf(fd, "model\t\t: 9000/778/B160L - "
8366                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8367     }
8368     return 0;
8369 }
8370 #endif
8371 
8372 #if defined(TARGET_RISCV)
8373 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8374 {
8375     int i;
8376     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8377     RISCVCPU *cpu = env_archcpu(cpu_env);
8378     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8379     char *isa_string = riscv_isa_string(cpu);
8380     const char *mmu;
8381 
8382     if (cfg->mmu) {
8383         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8384     } else {
8385         mmu = "none";
8386     }
8387 
8388     for (i = 0; i < num_cpus; i++) {
8389         dprintf(fd, "processor\t: %d\n", i);
8390         dprintf(fd, "hart\t\t: %d\n", i);
8391         dprintf(fd, "isa\t\t: %s\n", isa_string);
8392         dprintf(fd, "mmu\t\t: %s\n", mmu);
8393         dprintf(fd, "uarch\t\t: qemu\n\n");
8394     }
8395 
8396     g_free(isa_string);
8397     return 0;
8398 }
8399 #endif
8400 
8401 #if defined(TARGET_S390X)
8402 /*
8403  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8404  * show in /proc/cpuinfo.
8405  *
8406  * Skip the following in order to match the missing support in op_ecag():
8407  * - show_cacheinfo().
8408  * - show_cpu_topology().
8409  * - show_cpu_mhz().
8410  *
8411  * Use fixed values for certain fields:
8412  * - bogomips per cpu - from a qemu-system-s390x run.
8413  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8414  *
8415  * Keep the code structure close to arch/s390/kernel/processor.c.
8416  */
8417 
8418 static void show_facilities(int fd)
8419 {
8420     size_t sizeof_stfl_bytes = 2048;
8421     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8422     unsigned int bit;
8423 
8424     dprintf(fd, "facilities      :");
8425     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8426     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8427         if (test_be_bit(bit, stfl_bytes)) {
8428             dprintf(fd, " %d", bit);
8429         }
8430     }
8431     dprintf(fd, "\n");
8432 }
8433 
8434 static int cpu_ident(unsigned long n)
8435 {
8436     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8437                      n);
8438 }
8439 
8440 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8441 {
8442     S390CPUModel *model = env_archcpu(cpu_env)->model;
8443     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8444     uint32_t elf_hwcap = get_elf_hwcap();
8445     const char *hwcap_str;
8446     int i;
8447 
8448     dprintf(fd, "vendor_id       : IBM/S390\n"
8449                 "# processors    : %i\n"
8450                 "bogomips per cpu: 13370.00\n",
8451             num_cpus);
8452     dprintf(fd, "max thread id   : 0\n");
8453     dprintf(fd, "features\t: ");
8454     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8455         if (!(elf_hwcap & (1 << i))) {
8456             continue;
8457         }
8458         hwcap_str = elf_hwcap_str(i);
8459         if (hwcap_str) {
8460             dprintf(fd, "%s ", hwcap_str);
8461         }
8462     }
8463     dprintf(fd, "\n");
8464     show_facilities(fd);
8465     for (i = 0; i < num_cpus; i++) {
8466         dprintf(fd, "processor %d: "
8467                "version = %02X,  "
8468                "identification = %06X,  "
8469                "machine = %04X\n",
8470                i, model->cpu_ver, cpu_ident(i), model->def->type);
8471     }
8472 }
8473 
8474 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8475 {
8476     S390CPUModel *model = env_archcpu(cpu_env)->model;
8477 
8478     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8479     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8480     dprintf(fd, "machine         : %04X\n", model->def->type);
8481 }
8482 
8483 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8484 {
8485     dprintf(fd, "\ncpu number      : %ld\n", n);
8486     show_cpu_ids(cpu_env, fd, n);
8487 }
8488 
8489 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8490 {
8491     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8492     int i;
8493 
8494     show_cpu_summary(cpu_env, fd);
8495     for (i = 0; i < num_cpus; i++) {
8496         show_cpuinfo(cpu_env, fd, i);
8497     }
8498     return 0;
8499 }
8500 #endif
8501 
8502 #if defined(TARGET_M68K)
8503 static int open_hardware(CPUArchState *cpu_env, int fd)
8504 {
8505     dprintf(fd, "Model:\t\tqemu-m68k\n");
8506     return 0;
8507 }
8508 #endif
8509 
8510 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8511                     int flags, mode_t mode, bool safe)
8512 {
8513     struct fake_open {
8514         const char *filename;
8515         int (*fill)(CPUArchState *cpu_env, int fd);
8516         int (*cmp)(const char *s1, const char *s2);
8517     };
8518     const struct fake_open *fake_open;
8519     static const struct fake_open fakes[] = {
8520         { "maps", open_self_maps, is_proc_myself },
8521         { "smaps", open_self_smaps, is_proc_myself },
8522         { "stat", open_self_stat, is_proc_myself },
8523         { "auxv", open_self_auxv, is_proc_myself },
8524         { "cmdline", open_self_cmdline, is_proc_myself },
8525 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8526         { "/proc/net/route", open_net_route, is_proc },
8527 #endif
8528 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8529     defined(TARGET_RISCV) || defined(TARGET_S390X)
8530         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8531 #endif
8532 #if defined(TARGET_M68K)
8533         { "/proc/hardware", open_hardware, is_proc },
8534 #endif
8535         { NULL, NULL, NULL }
8536     };
8537 
8538     if (is_proc_myself(pathname, "exe")) {
8539         if (safe) {
8540             return safe_openat(dirfd, exec_path, flags, mode);
8541         } else {
8542             return openat(dirfd, exec_path, flags, mode);
8543         }
8544     }
8545 
8546     for (fake_open = fakes; fake_open->filename; fake_open++) {
8547         if (fake_open->cmp(pathname, fake_open->filename)) {
8548             break;
8549         }
8550     }
8551 
8552     if (fake_open->filename) {
8553         const char *tmpdir;
8554         char filename[PATH_MAX];
8555         int fd, r;
8556 
8557         fd = memfd_create("qemu-open", 0);
8558         if (fd < 0) {
8559             if (errno != ENOSYS) {
8560                 return fd;
8561             }
8562             /* create temporary file to map stat to */
8563             tmpdir = getenv("TMPDIR");
8564             if (!tmpdir)
8565                 tmpdir = "/tmp";
8566             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8567             fd = mkstemp(filename);
8568             if (fd < 0) {
8569                 return fd;
8570             }
8571             unlink(filename);
8572         }
8573 
8574         if ((r = fake_open->fill(cpu_env, fd))) {
8575             int e = errno;
8576             close(fd);
8577             errno = e;
8578             return r;
8579         }
8580         lseek(fd, 0, SEEK_SET);
8581 
8582         return fd;
8583     }
8584 
8585     if (safe) {
8586         return safe_openat(dirfd, path(pathname), flags, mode);
8587     } else {
8588         return openat(dirfd, path(pathname), flags, mode);
8589     }
8590 }
8591 
8592 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8593 {
8594     ssize_t ret;
8595 
8596     if (!pathname || !buf) {
8597         errno = EFAULT;
8598         return -1;
8599     }
8600 
8601     if (!bufsiz) {
8602         /* Short circuit this for the magic exe check. */
8603         errno = EINVAL;
8604         return -1;
8605     }
8606 
8607     if (is_proc_myself((const char *)pathname, "exe")) {
8608         /*
8609          * Don't worry about sign mismatch as earlier mapping
8610          * logic would have thrown a bad address error.
8611          */
8612         ret = MIN(strlen(exec_path), bufsiz);
8613         /* We cannot NUL terminate the string. */
8614         memcpy(buf, exec_path, ret);
8615     } else {
8616         ret = readlink(path(pathname), buf, bufsiz);
8617     }
8618 
8619     return ret;
8620 }
8621 
8622 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8623                        abi_long pathname, abi_long guest_argp,
8624                        abi_long guest_envp, int flags)
8625 {
8626     int ret;
8627     char **argp, **envp;
8628     int argc, envc;
8629     abi_ulong gp;
8630     abi_ulong addr;
8631     char **q;
8632     void *p;
8633 
8634     argc = 0;
8635 
8636     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8637         if (get_user_ual(addr, gp)) {
8638             return -TARGET_EFAULT;
8639         }
8640         if (!addr) {
8641             break;
8642         }
8643         argc++;
8644     }
8645     envc = 0;
8646     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8647         if (get_user_ual(addr, gp)) {
8648             return -TARGET_EFAULT;
8649         }
8650         if (!addr) {
8651             break;
8652         }
8653         envc++;
8654     }
8655 
8656     argp = g_new0(char *, argc + 1);
8657     envp = g_new0(char *, envc + 1);
8658 
8659     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8660         if (get_user_ual(addr, gp)) {
8661             goto execve_efault;
8662         }
8663         if (!addr) {
8664             break;
8665         }
8666         *q = lock_user_string(addr);
8667         if (!*q) {
8668             goto execve_efault;
8669         }
8670     }
8671     *q = NULL;
8672 
8673     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8674         if (get_user_ual(addr, gp)) {
8675             goto execve_efault;
8676         }
8677         if (!addr) {
8678             break;
8679         }
8680         *q = lock_user_string(addr);
8681         if (!*q) {
8682             goto execve_efault;
8683         }
8684     }
8685     *q = NULL;
8686 
8687     /*
8688      * Although execve() is not an interruptible syscall it is
8689      * a special case where we must use the safe_syscall wrapper:
8690      * if we allow a signal to happen before we make the host
8691      * syscall then we will 'lose' it, because at the point of
8692      * execve the process leaves QEMU's control. So we use the
8693      * safe syscall wrapper to ensure that we either take the
8694      * signal as a guest signal, or else it does not happen
8695      * before the execve completes and makes it the other
8696      * program's problem.
8697      */
8698     p = lock_user_string(pathname);
8699     if (!p) {
8700         goto execve_efault;
8701     }
8702 
8703     if (is_proc_myself(p, "exe")) {
8704         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8705     } else {
8706         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8707     }
8708 
8709     unlock_user(p, pathname, 0);
8710 
8711     goto execve_end;
8712 
8713 execve_efault:
8714     ret = -TARGET_EFAULT;
8715 
8716 execve_end:
8717     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8718         if (get_user_ual(addr, gp) || !addr) {
8719             break;
8720         }
8721         unlock_user(*q, addr, 0);
8722     }
8723     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8724         if (get_user_ual(addr, gp) || !addr) {
8725             break;
8726         }
8727         unlock_user(*q, addr, 0);
8728     }
8729 
8730     g_free(argp);
8731     g_free(envp);
8732     return ret;
8733 }
8734 
8735 #define TIMER_MAGIC 0x0caf0000
8736 #define TIMER_MAGIC_MASK 0xffff0000
8737 
8738 /* Convert QEMU provided timer ID back to internal 16bit index format */
8739 static target_timer_t get_timer_id(abi_long arg)
8740 {
8741     target_timer_t timerid = arg;
8742 
8743     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8744         return -TARGET_EINVAL;
8745     }
8746 
8747     timerid &= 0xffff;
8748 
8749     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8750         return -TARGET_EINVAL;
8751     }
8752 
8753     return timerid;
8754 }
8755 
8756 static int target_to_host_cpu_mask(unsigned long *host_mask,
8757                                    size_t host_size,
8758                                    abi_ulong target_addr,
8759                                    size_t target_size)
8760 {
8761     unsigned target_bits = sizeof(abi_ulong) * 8;
8762     unsigned host_bits = sizeof(*host_mask) * 8;
8763     abi_ulong *target_mask;
8764     unsigned i, j;
8765 
8766     assert(host_size >= target_size);
8767 
8768     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8769     if (!target_mask) {
8770         return -TARGET_EFAULT;
8771     }
8772     memset(host_mask, 0, host_size);
8773 
8774     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8775         unsigned bit = i * target_bits;
8776         abi_ulong val;
8777 
8778         __get_user(val, &target_mask[i]);
8779         for (j = 0; j < target_bits; j++, bit++) {
8780             if (val & (1UL << j)) {
8781                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8782             }
8783         }
8784     }
8785 
8786     unlock_user(target_mask, target_addr, 0);
8787     return 0;
8788 }
8789 
8790 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8791                                    size_t host_size,
8792                                    abi_ulong target_addr,
8793                                    size_t target_size)
8794 {
8795     unsigned target_bits = sizeof(abi_ulong) * 8;
8796     unsigned host_bits = sizeof(*host_mask) * 8;
8797     abi_ulong *target_mask;
8798     unsigned i, j;
8799 
8800     assert(host_size >= target_size);
8801 
8802     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8803     if (!target_mask) {
8804         return -TARGET_EFAULT;
8805     }
8806 
8807     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8808         unsigned bit = i * target_bits;
8809         abi_ulong val = 0;
8810 
8811         for (j = 0; j < target_bits; j++, bit++) {
8812             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8813                 val |= 1UL << j;
8814             }
8815         }
8816         __put_user(val, &target_mask[i]);
8817     }
8818 
8819     unlock_user(target_mask, target_addr, target_size);
8820     return 0;
8821 }
8822 
8823 #ifdef TARGET_NR_getdents
8824 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8825 {
8826     g_autofree void *hdirp = NULL;
8827     void *tdirp;
8828     int hlen, hoff, toff;
8829     int hreclen, treclen;
8830     off64_t prev_diroff = 0;
8831 
8832     hdirp = g_try_malloc(count);
8833     if (!hdirp) {
8834         return -TARGET_ENOMEM;
8835     }
8836 
8837 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8838     hlen = sys_getdents(dirfd, hdirp, count);
8839 #else
8840     hlen = sys_getdents64(dirfd, hdirp, count);
8841 #endif
8842 
8843     hlen = get_errno(hlen);
8844     if (is_error(hlen)) {
8845         return hlen;
8846     }
8847 
8848     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8849     if (!tdirp) {
8850         return -TARGET_EFAULT;
8851     }
8852 
8853     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8854 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8855         struct linux_dirent *hde = hdirp + hoff;
8856 #else
8857         struct linux_dirent64 *hde = hdirp + hoff;
8858 #endif
8859         struct target_dirent *tde = tdirp + toff;
8860         int namelen;
8861         uint8_t type;
8862 
8863         namelen = strlen(hde->d_name);
8864         hreclen = hde->d_reclen;
8865         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8866         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8867 
8868         if (toff + treclen > count) {
8869             /*
8870              * If the host struct is smaller than the target struct, or
8871              * requires less alignment and thus packs into less space,
8872              * then the host can return more entries than we can pass
8873              * on to the guest.
8874              */
8875             if (toff == 0) {
8876                 toff = -TARGET_EINVAL; /* result buffer is too small */
8877                 break;
8878             }
8879             /*
8880              * Return what we have, resetting the file pointer to the
8881              * location of the first record not returned.
8882              */
8883             lseek64(dirfd, prev_diroff, SEEK_SET);
8884             break;
8885         }
8886 
8887         prev_diroff = hde->d_off;
8888         tde->d_ino = tswapal(hde->d_ino);
8889         tde->d_off = tswapal(hde->d_off);
8890         tde->d_reclen = tswap16(treclen);
8891         memcpy(tde->d_name, hde->d_name, namelen + 1);
8892 
8893         /*
8894          * The getdents type is in what was formerly a padding byte at the
8895          * end of the structure.
8896          */
8897 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8898         type = *((uint8_t *)hde + hreclen - 1);
8899 #else
8900         type = hde->d_type;
8901 #endif
8902         *((uint8_t *)tde + treclen - 1) = type;
8903     }
8904 
8905     unlock_user(tdirp, arg2, toff);
8906     return toff;
8907 }
8908 #endif /* TARGET_NR_getdents */
8909 
8910 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8911 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8912 {
8913     g_autofree void *hdirp = NULL;
8914     void *tdirp;
8915     int hlen, hoff, toff;
8916     int hreclen, treclen;
8917     off64_t prev_diroff = 0;
8918 
8919     hdirp = g_try_malloc(count);
8920     if (!hdirp) {
8921         return -TARGET_ENOMEM;
8922     }
8923 
8924     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8925     if (is_error(hlen)) {
8926         return hlen;
8927     }
8928 
8929     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8930     if (!tdirp) {
8931         return -TARGET_EFAULT;
8932     }
8933 
8934     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8935         struct linux_dirent64 *hde = hdirp + hoff;
8936         struct target_dirent64 *tde = tdirp + toff;
8937         int namelen;
8938 
8939         namelen = strlen(hde->d_name) + 1;
8940         hreclen = hde->d_reclen;
8941         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8942         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8943 
8944         if (toff + treclen > count) {
8945             /*
8946              * If the host struct is smaller than the target struct, or
8947              * requires less alignment and thus packs into less space,
8948              * then the host can return more entries than we can pass
8949              * on to the guest.
8950              */
8951             if (toff == 0) {
8952                 toff = -TARGET_EINVAL; /* result buffer is too small */
8953                 break;
8954             }
8955             /*
8956              * Return what we have, resetting the file pointer to the
8957              * location of the first record not returned.
8958              */
8959             lseek64(dirfd, prev_diroff, SEEK_SET);
8960             break;
8961         }
8962 
8963         prev_diroff = hde->d_off;
8964         tde->d_ino = tswap64(hde->d_ino);
8965         tde->d_off = tswap64(hde->d_off);
8966         tde->d_reclen = tswap16(treclen);
8967         tde->d_type = hde->d_type;
8968         memcpy(tde->d_name, hde->d_name, namelen);
8969     }
8970 
8971     unlock_user(tdirp, arg2, toff);
8972     return toff;
8973 }
8974 #endif /* TARGET_NR_getdents64 */
8975 
8976 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8977 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8978 #endif
8979 
8980 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
8981 #define __NR_sys_open_tree __NR_open_tree
8982 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
8983           unsigned int, __flags)
8984 #endif
8985 
8986 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
8987 #define __NR_sys_move_mount __NR_move_mount
8988 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
8989            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
8990 #endif
8991 
8992 /* This is an internal helper for do_syscall so that it is easier
8993  * to have a single return point, so that actions, such as logging
8994  * of syscall results, can be performed.
8995  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8996  */
8997 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8998                             abi_long arg2, abi_long arg3, abi_long arg4,
8999                             abi_long arg5, abi_long arg6, abi_long arg7,
9000                             abi_long arg8)
9001 {
9002     CPUState *cpu = env_cpu(cpu_env);
9003     abi_long ret;
9004 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9005     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9006     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9007     || defined(TARGET_NR_statx)
9008     struct stat st;
9009 #endif
9010 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9011     || defined(TARGET_NR_fstatfs)
9012     struct statfs stfs;
9013 #endif
9014     void *p;
9015 
9016     switch(num) {
9017     case TARGET_NR_exit:
9018         /* In old applications this may be used to implement _exit(2).
9019            However in threaded applications it is used for thread termination,
9020            and _exit_group is used for application termination.
9021            Do thread termination if we have more then one thread.  */
9022 
9023         if (block_signals()) {
9024             return -QEMU_ERESTARTSYS;
9025         }
9026 
9027         pthread_mutex_lock(&clone_lock);
9028 
9029         if (CPU_NEXT(first_cpu)) {
9030             TaskState *ts = cpu->opaque;
9031 
9032             if (ts->child_tidptr) {
9033                 put_user_u32(0, ts->child_tidptr);
9034                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9035                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9036             }
9037 
9038             object_unparent(OBJECT(cpu));
9039             object_unref(OBJECT(cpu));
9040             /*
9041              * At this point the CPU should be unrealized and removed
9042              * from cpu lists. We can clean-up the rest of the thread
9043              * data without the lock held.
9044              */
9045 
9046             pthread_mutex_unlock(&clone_lock);
9047 
9048             thread_cpu = NULL;
9049             g_free(ts);
9050             rcu_unregister_thread();
9051             pthread_exit(NULL);
9052         }
9053 
9054         pthread_mutex_unlock(&clone_lock);
9055         preexit_cleanup(cpu_env, arg1);
9056         _exit(arg1);
9057         return 0; /* avoid warning */
9058     case TARGET_NR_read:
9059         if (arg2 == 0 && arg3 == 0) {
9060             return get_errno(safe_read(arg1, 0, 0));
9061         } else {
9062             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9063                 return -TARGET_EFAULT;
9064             ret = get_errno(safe_read(arg1, p, arg3));
9065             if (ret >= 0 &&
9066                 fd_trans_host_to_target_data(arg1)) {
9067                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9068             }
9069             unlock_user(p, arg2, ret);
9070         }
9071         return ret;
9072     case TARGET_NR_write:
9073         if (arg2 == 0 && arg3 == 0) {
9074             return get_errno(safe_write(arg1, 0, 0));
9075         }
9076         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9077             return -TARGET_EFAULT;
9078         if (fd_trans_target_to_host_data(arg1)) {
9079             void *copy = g_malloc(arg3);
9080             memcpy(copy, p, arg3);
9081             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9082             if (ret >= 0) {
9083                 ret = get_errno(safe_write(arg1, copy, ret));
9084             }
9085             g_free(copy);
9086         } else {
9087             ret = get_errno(safe_write(arg1, p, arg3));
9088         }
9089         unlock_user(p, arg2, 0);
9090         return ret;
9091 
9092 #ifdef TARGET_NR_open
9093     case TARGET_NR_open:
9094         if (!(p = lock_user_string(arg1)))
9095             return -TARGET_EFAULT;
9096         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9097                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9098                                   arg3, true));
9099         fd_trans_unregister(ret);
9100         unlock_user(p, arg1, 0);
9101         return ret;
9102 #endif
9103     case TARGET_NR_openat:
9104         if (!(p = lock_user_string(arg2)))
9105             return -TARGET_EFAULT;
9106         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9107                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9108                                   arg4, true));
9109         fd_trans_unregister(ret);
9110         unlock_user(p, arg2, 0);
9111         return ret;
9112 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9113     case TARGET_NR_name_to_handle_at:
9114         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9115         return ret;
9116 #endif
9117 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9118     case TARGET_NR_open_by_handle_at:
9119         ret = do_open_by_handle_at(arg1, arg2, arg3);
9120         fd_trans_unregister(ret);
9121         return ret;
9122 #endif
9123 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9124     case TARGET_NR_pidfd_open:
9125         return get_errno(pidfd_open(arg1, arg2));
9126 #endif
9127 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9128     case TARGET_NR_pidfd_send_signal:
9129         {
9130             siginfo_t uinfo, *puinfo;
9131 
9132             if (arg3) {
9133                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9134                 if (!p) {
9135                     return -TARGET_EFAULT;
9136                  }
9137                  target_to_host_siginfo(&uinfo, p);
9138                  unlock_user(p, arg3, 0);
9139                  puinfo = &uinfo;
9140             } else {
9141                  puinfo = NULL;
9142             }
9143             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9144                                               puinfo, arg4));
9145         }
9146         return ret;
9147 #endif
9148 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9149     case TARGET_NR_pidfd_getfd:
9150         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9151 #endif
9152     case TARGET_NR_close:
9153         fd_trans_unregister(arg1);
9154         return get_errno(close(arg1));
9155 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9156     case TARGET_NR_close_range:
9157         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9158         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9159             abi_long fd, maxfd;
9160             maxfd = MIN(arg2, target_fd_max);
9161             for (fd = arg1; fd < maxfd; fd++) {
9162                 fd_trans_unregister(fd);
9163             }
9164         }
9165         return ret;
9166 #endif
9167 
9168     case TARGET_NR_brk:
9169         return do_brk(arg1);
9170 #ifdef TARGET_NR_fork
9171     case TARGET_NR_fork:
9172         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9173 #endif
9174 #ifdef TARGET_NR_waitpid
9175     case TARGET_NR_waitpid:
9176         {
9177             int status;
9178             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9179             if (!is_error(ret) && arg2 && ret
9180                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9181                 return -TARGET_EFAULT;
9182         }
9183         return ret;
9184 #endif
9185 #ifdef TARGET_NR_waitid
9186     case TARGET_NR_waitid:
9187         {
9188             siginfo_t info;
9189             info.si_pid = 0;
9190             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9191             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9192                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9193                     return -TARGET_EFAULT;
9194                 host_to_target_siginfo(p, &info);
9195                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9196             }
9197         }
9198         return ret;
9199 #endif
9200 #ifdef TARGET_NR_creat /* not on alpha */
9201     case TARGET_NR_creat:
9202         if (!(p = lock_user_string(arg1)))
9203             return -TARGET_EFAULT;
9204         ret = get_errno(creat(p, arg2));
9205         fd_trans_unregister(ret);
9206         unlock_user(p, arg1, 0);
9207         return ret;
9208 #endif
9209 #ifdef TARGET_NR_link
9210     case TARGET_NR_link:
9211         {
9212             void * p2;
9213             p = lock_user_string(arg1);
9214             p2 = lock_user_string(arg2);
9215             if (!p || !p2)
9216                 ret = -TARGET_EFAULT;
9217             else
9218                 ret = get_errno(link(p, p2));
9219             unlock_user(p2, arg2, 0);
9220             unlock_user(p, arg1, 0);
9221         }
9222         return ret;
9223 #endif
9224 #if defined(TARGET_NR_linkat)
9225     case TARGET_NR_linkat:
9226         {
9227             void * p2 = NULL;
9228             if (!arg2 || !arg4)
9229                 return -TARGET_EFAULT;
9230             p  = lock_user_string(arg2);
9231             p2 = lock_user_string(arg4);
9232             if (!p || !p2)
9233                 ret = -TARGET_EFAULT;
9234             else
9235                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9236             unlock_user(p, arg2, 0);
9237             unlock_user(p2, arg4, 0);
9238         }
9239         return ret;
9240 #endif
9241 #ifdef TARGET_NR_unlink
9242     case TARGET_NR_unlink:
9243         if (!(p = lock_user_string(arg1)))
9244             return -TARGET_EFAULT;
9245         ret = get_errno(unlink(p));
9246         unlock_user(p, arg1, 0);
9247         return ret;
9248 #endif
9249 #if defined(TARGET_NR_unlinkat)
9250     case TARGET_NR_unlinkat:
9251         if (!(p = lock_user_string(arg2)))
9252             return -TARGET_EFAULT;
9253         ret = get_errno(unlinkat(arg1, p, arg3));
9254         unlock_user(p, arg2, 0);
9255         return ret;
9256 #endif
9257     case TARGET_NR_execveat:
9258         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
9259     case TARGET_NR_execve:
9260         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
9261     case TARGET_NR_chdir:
9262         if (!(p = lock_user_string(arg1)))
9263             return -TARGET_EFAULT;
9264         ret = get_errno(chdir(p));
9265         unlock_user(p, arg1, 0);
9266         return ret;
9267 #ifdef TARGET_NR_time
9268     case TARGET_NR_time:
9269         {
9270             time_t host_time;
9271             ret = get_errno(time(&host_time));
9272             if (!is_error(ret)
9273                 && arg1
9274                 && put_user_sal(host_time, arg1))
9275                 return -TARGET_EFAULT;
9276         }
9277         return ret;
9278 #endif
9279 #ifdef TARGET_NR_mknod
9280     case TARGET_NR_mknod:
9281         if (!(p = lock_user_string(arg1)))
9282             return -TARGET_EFAULT;
9283         ret = get_errno(mknod(p, arg2, arg3));
9284         unlock_user(p, arg1, 0);
9285         return ret;
9286 #endif
9287 #if defined(TARGET_NR_mknodat)
9288     case TARGET_NR_mknodat:
9289         if (!(p = lock_user_string(arg2)))
9290             return -TARGET_EFAULT;
9291         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9292         unlock_user(p, arg2, 0);
9293         return ret;
9294 #endif
9295 #ifdef TARGET_NR_chmod
9296     case TARGET_NR_chmod:
9297         if (!(p = lock_user_string(arg1)))
9298             return -TARGET_EFAULT;
9299         ret = get_errno(chmod(p, arg2));
9300         unlock_user(p, arg1, 0);
9301         return ret;
9302 #endif
9303 #ifdef TARGET_NR_lseek
9304     case TARGET_NR_lseek:
9305         return get_errno(lseek(arg1, arg2, arg3));
9306 #endif
9307 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9308     /* Alpha specific */
9309     case TARGET_NR_getxpid:
9310         cpu_env->ir[IR_A4] = getppid();
9311         return get_errno(getpid());
9312 #endif
9313 #ifdef TARGET_NR_getpid
9314     case TARGET_NR_getpid:
9315         return get_errno(getpid());
9316 #endif
9317     case TARGET_NR_mount:
9318         {
9319             /* need to look at the data field */
9320             void *p2, *p3;
9321 
9322             if (arg1) {
9323                 p = lock_user_string(arg1);
9324                 if (!p) {
9325                     return -TARGET_EFAULT;
9326                 }
9327             } else {
9328                 p = NULL;
9329             }
9330 
9331             p2 = lock_user_string(arg2);
9332             if (!p2) {
9333                 if (arg1) {
9334                     unlock_user(p, arg1, 0);
9335                 }
9336                 return -TARGET_EFAULT;
9337             }
9338 
9339             if (arg3) {
9340                 p3 = lock_user_string(arg3);
9341                 if (!p3) {
9342                     if (arg1) {
9343                         unlock_user(p, arg1, 0);
9344                     }
9345                     unlock_user(p2, arg2, 0);
9346                     return -TARGET_EFAULT;
9347                 }
9348             } else {
9349                 p3 = NULL;
9350             }
9351 
9352             /* FIXME - arg5 should be locked, but it isn't clear how to
9353              * do that since it's not guaranteed to be a NULL-terminated
9354              * string.
9355              */
9356             if (!arg5) {
9357                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9358             } else {
9359                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9360             }
9361             ret = get_errno(ret);
9362 
9363             if (arg1) {
9364                 unlock_user(p, arg1, 0);
9365             }
9366             unlock_user(p2, arg2, 0);
9367             if (arg3) {
9368                 unlock_user(p3, arg3, 0);
9369             }
9370         }
9371         return ret;
9372 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9373 #if defined(TARGET_NR_umount)
9374     case TARGET_NR_umount:
9375 #endif
9376 #if defined(TARGET_NR_oldumount)
9377     case TARGET_NR_oldumount:
9378 #endif
9379         if (!(p = lock_user_string(arg1)))
9380             return -TARGET_EFAULT;
9381         ret = get_errno(umount(p));
9382         unlock_user(p, arg1, 0);
9383         return ret;
9384 #endif
9385 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9386     case TARGET_NR_move_mount:
9387         {
9388             void *p2, *p4;
9389 
9390             if (!arg2 || !arg4) {
9391                 return -TARGET_EFAULT;
9392             }
9393 
9394             p2 = lock_user_string(arg2);
9395             if (!p2) {
9396                 return -TARGET_EFAULT;
9397             }
9398 
9399             p4 = lock_user_string(arg4);
9400             if (!p4) {
9401                 unlock_user(p2, arg2, 0);
9402                 return -TARGET_EFAULT;
9403             }
9404             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9405 
9406             unlock_user(p2, arg2, 0);
9407             unlock_user(p4, arg4, 0);
9408 
9409             return ret;
9410         }
9411 #endif
9412 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9413     case TARGET_NR_open_tree:
9414         {
9415             void *p2;
9416             int host_flags;
9417 
9418             if (!arg2) {
9419                 return -TARGET_EFAULT;
9420             }
9421 
9422             p2 = lock_user_string(arg2);
9423             if (!p2) {
9424                 return -TARGET_EFAULT;
9425             }
9426 
9427             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9428             if (arg3 & TARGET_O_CLOEXEC) {
9429                 host_flags |= O_CLOEXEC;
9430             }
9431 
9432             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9433 
9434             unlock_user(p2, arg2, 0);
9435 
9436             return ret;
9437         }
9438 #endif
9439 #ifdef TARGET_NR_stime /* not on alpha */
9440     case TARGET_NR_stime:
9441         {
9442             struct timespec ts;
9443             ts.tv_nsec = 0;
9444             if (get_user_sal(ts.tv_sec, arg1)) {
9445                 return -TARGET_EFAULT;
9446             }
9447             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9448         }
9449 #endif
9450 #ifdef TARGET_NR_alarm /* not on alpha */
9451     case TARGET_NR_alarm:
9452         return alarm(arg1);
9453 #endif
9454 #ifdef TARGET_NR_pause /* not on alpha */
9455     case TARGET_NR_pause:
9456         if (!block_signals()) {
9457             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9458         }
9459         return -TARGET_EINTR;
9460 #endif
9461 #ifdef TARGET_NR_utime
9462     case TARGET_NR_utime:
9463         {
9464             struct utimbuf tbuf, *host_tbuf;
9465             struct target_utimbuf *target_tbuf;
9466             if (arg2) {
9467                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9468                     return -TARGET_EFAULT;
9469                 tbuf.actime = tswapal(target_tbuf->actime);
9470                 tbuf.modtime = tswapal(target_tbuf->modtime);
9471                 unlock_user_struct(target_tbuf, arg2, 0);
9472                 host_tbuf = &tbuf;
9473             } else {
9474                 host_tbuf = NULL;
9475             }
9476             if (!(p = lock_user_string(arg1)))
9477                 return -TARGET_EFAULT;
9478             ret = get_errno(utime(p, host_tbuf));
9479             unlock_user(p, arg1, 0);
9480         }
9481         return ret;
9482 #endif
9483 #ifdef TARGET_NR_utimes
9484     case TARGET_NR_utimes:
9485         {
9486             struct timeval *tvp, tv[2];
9487             if (arg2) {
9488                 if (copy_from_user_timeval(&tv[0], arg2)
9489                     || copy_from_user_timeval(&tv[1],
9490                                               arg2 + sizeof(struct target_timeval)))
9491                     return -TARGET_EFAULT;
9492                 tvp = tv;
9493             } else {
9494                 tvp = NULL;
9495             }
9496             if (!(p = lock_user_string(arg1)))
9497                 return -TARGET_EFAULT;
9498             ret = get_errno(utimes(p, tvp));
9499             unlock_user(p, arg1, 0);
9500         }
9501         return ret;
9502 #endif
9503 #if defined(TARGET_NR_futimesat)
9504     case TARGET_NR_futimesat:
9505         {
9506             struct timeval *tvp, tv[2];
9507             if (arg3) {
9508                 if (copy_from_user_timeval(&tv[0], arg3)
9509                     || copy_from_user_timeval(&tv[1],
9510                                               arg3 + sizeof(struct target_timeval)))
9511                     return -TARGET_EFAULT;
9512                 tvp = tv;
9513             } else {
9514                 tvp = NULL;
9515             }
9516             if (!(p = lock_user_string(arg2))) {
9517                 return -TARGET_EFAULT;
9518             }
9519             ret = get_errno(futimesat(arg1, path(p), tvp));
9520             unlock_user(p, arg2, 0);
9521         }
9522         return ret;
9523 #endif
9524 #ifdef TARGET_NR_access
9525     case TARGET_NR_access:
9526         if (!(p = lock_user_string(arg1))) {
9527             return -TARGET_EFAULT;
9528         }
9529         ret = get_errno(access(path(p), arg2));
9530         unlock_user(p, arg1, 0);
9531         return ret;
9532 #endif
9533 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9534     case TARGET_NR_faccessat:
9535         if (!(p = lock_user_string(arg2))) {
9536             return -TARGET_EFAULT;
9537         }
9538         ret = get_errno(faccessat(arg1, p, arg3, 0));
9539         unlock_user(p, arg2, 0);
9540         return ret;
9541 #endif
9542 #if defined(TARGET_NR_faccessat2)
9543     case TARGET_NR_faccessat2:
9544         if (!(p = lock_user_string(arg2))) {
9545             return -TARGET_EFAULT;
9546         }
9547         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9548         unlock_user(p, arg2, 0);
9549         return ret;
9550 #endif
9551 #ifdef TARGET_NR_nice /* not on alpha */
9552     case TARGET_NR_nice:
9553         return get_errno(nice(arg1));
9554 #endif
9555     case TARGET_NR_sync:
9556         sync();
9557         return 0;
9558 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9559     case TARGET_NR_syncfs:
9560         return get_errno(syncfs(arg1));
9561 #endif
9562     case TARGET_NR_kill:
9563         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9564 #ifdef TARGET_NR_rename
9565     case TARGET_NR_rename:
9566         {
9567             void *p2;
9568             p = lock_user_string(arg1);
9569             p2 = lock_user_string(arg2);
9570             if (!p || !p2)
9571                 ret = -TARGET_EFAULT;
9572             else
9573                 ret = get_errno(rename(p, p2));
9574             unlock_user(p2, arg2, 0);
9575             unlock_user(p, arg1, 0);
9576         }
9577         return ret;
9578 #endif
9579 #if defined(TARGET_NR_renameat)
9580     case TARGET_NR_renameat:
9581         {
9582             void *p2;
9583             p  = lock_user_string(arg2);
9584             p2 = lock_user_string(arg4);
9585             if (!p || !p2)
9586                 ret = -TARGET_EFAULT;
9587             else
9588                 ret = get_errno(renameat(arg1, p, arg3, p2));
9589             unlock_user(p2, arg4, 0);
9590             unlock_user(p, arg2, 0);
9591         }
9592         return ret;
9593 #endif
9594 #if defined(TARGET_NR_renameat2)
9595     case TARGET_NR_renameat2:
9596         {
9597             void *p2;
9598             p  = lock_user_string(arg2);
9599             p2 = lock_user_string(arg4);
9600             if (!p || !p2) {
9601                 ret = -TARGET_EFAULT;
9602             } else {
9603                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9604             }
9605             unlock_user(p2, arg4, 0);
9606             unlock_user(p, arg2, 0);
9607         }
9608         return ret;
9609 #endif
9610 #ifdef TARGET_NR_mkdir
9611     case TARGET_NR_mkdir:
9612         if (!(p = lock_user_string(arg1)))
9613             return -TARGET_EFAULT;
9614         ret = get_errno(mkdir(p, arg2));
9615         unlock_user(p, arg1, 0);
9616         return ret;
9617 #endif
9618 #if defined(TARGET_NR_mkdirat)
9619     case TARGET_NR_mkdirat:
9620         if (!(p = lock_user_string(arg2)))
9621             return -TARGET_EFAULT;
9622         ret = get_errno(mkdirat(arg1, p, arg3));
9623         unlock_user(p, arg2, 0);
9624         return ret;
9625 #endif
9626 #ifdef TARGET_NR_rmdir
9627     case TARGET_NR_rmdir:
9628         if (!(p = lock_user_string(arg1)))
9629             return -TARGET_EFAULT;
9630         ret = get_errno(rmdir(p));
9631         unlock_user(p, arg1, 0);
9632         return ret;
9633 #endif
9634     case TARGET_NR_dup:
9635         ret = get_errno(dup(arg1));
9636         if (ret >= 0) {
9637             fd_trans_dup(arg1, ret);
9638         }
9639         return ret;
9640 #ifdef TARGET_NR_pipe
9641     case TARGET_NR_pipe:
9642         return do_pipe(cpu_env, arg1, 0, 0);
9643 #endif
9644 #ifdef TARGET_NR_pipe2
9645     case TARGET_NR_pipe2:
9646         return do_pipe(cpu_env, arg1,
9647                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9648 #endif
9649     case TARGET_NR_times:
9650         {
9651             struct target_tms *tmsp;
9652             struct tms tms;
9653             ret = get_errno(times(&tms));
9654             if (arg1) {
9655                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9656                 if (!tmsp)
9657                     return -TARGET_EFAULT;
9658                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9659                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9660                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9661                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9662             }
9663             if (!is_error(ret))
9664                 ret = host_to_target_clock_t(ret);
9665         }
9666         return ret;
9667     case TARGET_NR_acct:
9668         if (arg1 == 0) {
9669             ret = get_errno(acct(NULL));
9670         } else {
9671             if (!(p = lock_user_string(arg1))) {
9672                 return -TARGET_EFAULT;
9673             }
9674             ret = get_errno(acct(path(p)));
9675             unlock_user(p, arg1, 0);
9676         }
9677         return ret;
9678 #ifdef TARGET_NR_umount2
9679     case TARGET_NR_umount2:
9680         if (!(p = lock_user_string(arg1)))
9681             return -TARGET_EFAULT;
9682         ret = get_errno(umount2(p, arg2));
9683         unlock_user(p, arg1, 0);
9684         return ret;
9685 #endif
9686     case TARGET_NR_ioctl:
9687         return do_ioctl(arg1, arg2, arg3);
9688 #ifdef TARGET_NR_fcntl
9689     case TARGET_NR_fcntl:
9690         return do_fcntl(arg1, arg2, arg3);
9691 #endif
9692     case TARGET_NR_setpgid:
9693         return get_errno(setpgid(arg1, arg2));
9694     case TARGET_NR_umask:
9695         return get_errno(umask(arg1));
9696     case TARGET_NR_chroot:
9697         if (!(p = lock_user_string(arg1)))
9698             return -TARGET_EFAULT;
9699         ret = get_errno(chroot(p));
9700         unlock_user(p, arg1, 0);
9701         return ret;
9702 #ifdef TARGET_NR_dup2
9703     case TARGET_NR_dup2:
9704         ret = get_errno(dup2(arg1, arg2));
9705         if (ret >= 0) {
9706             fd_trans_dup(arg1, arg2);
9707         }
9708         return ret;
9709 #endif
9710 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9711     case TARGET_NR_dup3:
9712     {
9713         int host_flags;
9714 
9715         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9716             return -EINVAL;
9717         }
9718         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9719         ret = get_errno(dup3(arg1, arg2, host_flags));
9720         if (ret >= 0) {
9721             fd_trans_dup(arg1, arg2);
9722         }
9723         return ret;
9724     }
9725 #endif
9726 #ifdef TARGET_NR_getppid /* not on alpha */
9727     case TARGET_NR_getppid:
9728         return get_errno(getppid());
9729 #endif
9730 #ifdef TARGET_NR_getpgrp
9731     case TARGET_NR_getpgrp:
9732         return get_errno(getpgrp());
9733 #endif
9734     case TARGET_NR_setsid:
9735         return get_errno(setsid());
9736 #ifdef TARGET_NR_sigaction
9737     case TARGET_NR_sigaction:
9738         {
9739 #if defined(TARGET_MIPS)
9740 	    struct target_sigaction act, oact, *pact, *old_act;
9741 
9742 	    if (arg2) {
9743                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9744                     return -TARGET_EFAULT;
9745 		act._sa_handler = old_act->_sa_handler;
9746 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9747 		act.sa_flags = old_act->sa_flags;
9748 		unlock_user_struct(old_act, arg2, 0);
9749 		pact = &act;
9750 	    } else {
9751 		pact = NULL;
9752 	    }
9753 
9754         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9755 
9756 	    if (!is_error(ret) && arg3) {
9757                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9758                     return -TARGET_EFAULT;
9759 		old_act->_sa_handler = oact._sa_handler;
9760 		old_act->sa_flags = oact.sa_flags;
9761 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9762 		old_act->sa_mask.sig[1] = 0;
9763 		old_act->sa_mask.sig[2] = 0;
9764 		old_act->sa_mask.sig[3] = 0;
9765 		unlock_user_struct(old_act, arg3, 1);
9766 	    }
9767 #else
9768             struct target_old_sigaction *old_act;
9769             struct target_sigaction act, oact, *pact;
9770             if (arg2) {
9771                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9772                     return -TARGET_EFAULT;
9773                 act._sa_handler = old_act->_sa_handler;
9774                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9775                 act.sa_flags = old_act->sa_flags;
9776 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9777                 act.sa_restorer = old_act->sa_restorer;
9778 #endif
9779                 unlock_user_struct(old_act, arg2, 0);
9780                 pact = &act;
9781             } else {
9782                 pact = NULL;
9783             }
9784             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9785             if (!is_error(ret) && arg3) {
9786                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9787                     return -TARGET_EFAULT;
9788                 old_act->_sa_handler = oact._sa_handler;
9789                 old_act->sa_mask = oact.sa_mask.sig[0];
9790                 old_act->sa_flags = oact.sa_flags;
9791 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9792                 old_act->sa_restorer = oact.sa_restorer;
9793 #endif
9794                 unlock_user_struct(old_act, arg3, 1);
9795             }
9796 #endif
9797         }
9798         return ret;
9799 #endif
9800     case TARGET_NR_rt_sigaction:
9801         {
9802             /*
9803              * For Alpha and SPARC this is a 5 argument syscall, with
9804              * a 'restorer' parameter which must be copied into the
9805              * sa_restorer field of the sigaction struct.
9806              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9807              * and arg5 is the sigsetsize.
9808              */
9809 #if defined(TARGET_ALPHA)
9810             target_ulong sigsetsize = arg4;
9811             target_ulong restorer = arg5;
9812 #elif defined(TARGET_SPARC)
9813             target_ulong restorer = arg4;
9814             target_ulong sigsetsize = arg5;
9815 #else
9816             target_ulong sigsetsize = arg4;
9817             target_ulong restorer = 0;
9818 #endif
9819             struct target_sigaction *act = NULL;
9820             struct target_sigaction *oact = NULL;
9821 
9822             if (sigsetsize != sizeof(target_sigset_t)) {
9823                 return -TARGET_EINVAL;
9824             }
9825             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9826                 return -TARGET_EFAULT;
9827             }
9828             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9829                 ret = -TARGET_EFAULT;
9830             } else {
9831                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9832                 if (oact) {
9833                     unlock_user_struct(oact, arg3, 1);
9834                 }
9835             }
9836             if (act) {
9837                 unlock_user_struct(act, arg2, 0);
9838             }
9839         }
9840         return ret;
9841 #ifdef TARGET_NR_sgetmask /* not on alpha */
9842     case TARGET_NR_sgetmask:
9843         {
9844             sigset_t cur_set;
9845             abi_ulong target_set;
9846             ret = do_sigprocmask(0, NULL, &cur_set);
9847             if (!ret) {
9848                 host_to_target_old_sigset(&target_set, &cur_set);
9849                 ret = target_set;
9850             }
9851         }
9852         return ret;
9853 #endif
9854 #ifdef TARGET_NR_ssetmask /* not on alpha */
9855     case TARGET_NR_ssetmask:
9856         {
9857             sigset_t set, oset;
9858             abi_ulong target_set = arg1;
9859             target_to_host_old_sigset(&set, &target_set);
9860             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9861             if (!ret) {
9862                 host_to_target_old_sigset(&target_set, &oset);
9863                 ret = target_set;
9864             }
9865         }
9866         return ret;
9867 #endif
9868 #ifdef TARGET_NR_sigprocmask
9869     case TARGET_NR_sigprocmask:
9870         {
9871 #if defined(TARGET_ALPHA)
9872             sigset_t set, oldset;
9873             abi_ulong mask;
9874             int how;
9875 
9876             switch (arg1) {
9877             case TARGET_SIG_BLOCK:
9878                 how = SIG_BLOCK;
9879                 break;
9880             case TARGET_SIG_UNBLOCK:
9881                 how = SIG_UNBLOCK;
9882                 break;
9883             case TARGET_SIG_SETMASK:
9884                 how = SIG_SETMASK;
9885                 break;
9886             default:
9887                 return -TARGET_EINVAL;
9888             }
9889             mask = arg2;
9890             target_to_host_old_sigset(&set, &mask);
9891 
9892             ret = do_sigprocmask(how, &set, &oldset);
9893             if (!is_error(ret)) {
9894                 host_to_target_old_sigset(&mask, &oldset);
9895                 ret = mask;
9896                 cpu_env->ir[IR_V0] = 0; /* force no error */
9897             }
9898 #else
9899             sigset_t set, oldset, *set_ptr;
9900             int how;
9901 
9902             if (arg2) {
9903                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9904                 if (!p) {
9905                     return -TARGET_EFAULT;
9906                 }
9907                 target_to_host_old_sigset(&set, p);
9908                 unlock_user(p, arg2, 0);
9909                 set_ptr = &set;
9910                 switch (arg1) {
9911                 case TARGET_SIG_BLOCK:
9912                     how = SIG_BLOCK;
9913                     break;
9914                 case TARGET_SIG_UNBLOCK:
9915                     how = SIG_UNBLOCK;
9916                     break;
9917                 case TARGET_SIG_SETMASK:
9918                     how = SIG_SETMASK;
9919                     break;
9920                 default:
9921                     return -TARGET_EINVAL;
9922                 }
9923             } else {
9924                 how = 0;
9925                 set_ptr = NULL;
9926             }
9927             ret = do_sigprocmask(how, set_ptr, &oldset);
9928             if (!is_error(ret) && arg3) {
9929                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9930                     return -TARGET_EFAULT;
9931                 host_to_target_old_sigset(p, &oldset);
9932                 unlock_user(p, arg3, sizeof(target_sigset_t));
9933             }
9934 #endif
9935         }
9936         return ret;
9937 #endif
9938     case TARGET_NR_rt_sigprocmask:
9939         {
9940             int how = arg1;
9941             sigset_t set, oldset, *set_ptr;
9942 
9943             if (arg4 != sizeof(target_sigset_t)) {
9944                 return -TARGET_EINVAL;
9945             }
9946 
9947             if (arg2) {
9948                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9949                 if (!p) {
9950                     return -TARGET_EFAULT;
9951                 }
9952                 target_to_host_sigset(&set, p);
9953                 unlock_user(p, arg2, 0);
9954                 set_ptr = &set;
9955                 switch(how) {
9956                 case TARGET_SIG_BLOCK:
9957                     how = SIG_BLOCK;
9958                     break;
9959                 case TARGET_SIG_UNBLOCK:
9960                     how = SIG_UNBLOCK;
9961                     break;
9962                 case TARGET_SIG_SETMASK:
9963                     how = SIG_SETMASK;
9964                     break;
9965                 default:
9966                     return -TARGET_EINVAL;
9967                 }
9968             } else {
9969                 how = 0;
9970                 set_ptr = NULL;
9971             }
9972             ret = do_sigprocmask(how, set_ptr, &oldset);
9973             if (!is_error(ret) && arg3) {
9974                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9975                     return -TARGET_EFAULT;
9976                 host_to_target_sigset(p, &oldset);
9977                 unlock_user(p, arg3, sizeof(target_sigset_t));
9978             }
9979         }
9980         return ret;
9981 #ifdef TARGET_NR_sigpending
9982     case TARGET_NR_sigpending:
9983         {
9984             sigset_t set;
9985             ret = get_errno(sigpending(&set));
9986             if (!is_error(ret)) {
9987                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9988                     return -TARGET_EFAULT;
9989                 host_to_target_old_sigset(p, &set);
9990                 unlock_user(p, arg1, sizeof(target_sigset_t));
9991             }
9992         }
9993         return ret;
9994 #endif
9995     case TARGET_NR_rt_sigpending:
9996         {
9997             sigset_t set;
9998 
9999             /* Yes, this check is >, not != like most. We follow the kernel's
10000              * logic and it does it like this because it implements
10001              * NR_sigpending through the same code path, and in that case
10002              * the old_sigset_t is smaller in size.
10003              */
10004             if (arg2 > sizeof(target_sigset_t)) {
10005                 return -TARGET_EINVAL;
10006             }
10007 
10008             ret = get_errno(sigpending(&set));
10009             if (!is_error(ret)) {
10010                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10011                     return -TARGET_EFAULT;
10012                 host_to_target_sigset(p, &set);
10013                 unlock_user(p, arg1, sizeof(target_sigset_t));
10014             }
10015         }
10016         return ret;
10017 #ifdef TARGET_NR_sigsuspend
10018     case TARGET_NR_sigsuspend:
10019         {
10020             sigset_t *set;
10021 
10022 #if defined(TARGET_ALPHA)
10023             TaskState *ts = cpu->opaque;
10024             /* target_to_host_old_sigset will bswap back */
10025             abi_ulong mask = tswapal(arg1);
10026             set = &ts->sigsuspend_mask;
10027             target_to_host_old_sigset(set, &mask);
10028 #else
10029             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10030             if (ret != 0) {
10031                 return ret;
10032             }
10033 #endif
10034             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10035             finish_sigsuspend_mask(ret);
10036         }
10037         return ret;
10038 #endif
10039     case TARGET_NR_rt_sigsuspend:
10040         {
10041             sigset_t *set;
10042 
10043             ret = process_sigsuspend_mask(&set, arg1, arg2);
10044             if (ret != 0) {
10045                 return ret;
10046             }
10047             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10048             finish_sigsuspend_mask(ret);
10049         }
10050         return ret;
10051 #ifdef TARGET_NR_rt_sigtimedwait
10052     case TARGET_NR_rt_sigtimedwait:
10053         {
10054             sigset_t set;
10055             struct timespec uts, *puts;
10056             siginfo_t uinfo;
10057 
10058             if (arg4 != sizeof(target_sigset_t)) {
10059                 return -TARGET_EINVAL;
10060             }
10061 
10062             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10063                 return -TARGET_EFAULT;
10064             target_to_host_sigset(&set, p);
10065             unlock_user(p, arg1, 0);
10066             if (arg3) {
10067                 puts = &uts;
10068                 if (target_to_host_timespec(puts, arg3)) {
10069                     return -TARGET_EFAULT;
10070                 }
10071             } else {
10072                 puts = NULL;
10073             }
10074             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10075                                                  SIGSET_T_SIZE));
10076             if (!is_error(ret)) {
10077                 if (arg2) {
10078                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10079                                   0);
10080                     if (!p) {
10081                         return -TARGET_EFAULT;
10082                     }
10083                     host_to_target_siginfo(p, &uinfo);
10084                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10085                 }
10086                 ret = host_to_target_signal(ret);
10087             }
10088         }
10089         return ret;
10090 #endif
10091 #ifdef TARGET_NR_rt_sigtimedwait_time64
10092     case TARGET_NR_rt_sigtimedwait_time64:
10093         {
10094             sigset_t set;
10095             struct timespec uts, *puts;
10096             siginfo_t uinfo;
10097 
10098             if (arg4 != sizeof(target_sigset_t)) {
10099                 return -TARGET_EINVAL;
10100             }
10101 
10102             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10103             if (!p) {
10104                 return -TARGET_EFAULT;
10105             }
10106             target_to_host_sigset(&set, p);
10107             unlock_user(p, arg1, 0);
10108             if (arg3) {
10109                 puts = &uts;
10110                 if (target_to_host_timespec64(puts, arg3)) {
10111                     return -TARGET_EFAULT;
10112                 }
10113             } else {
10114                 puts = NULL;
10115             }
10116             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10117                                                  SIGSET_T_SIZE));
10118             if (!is_error(ret)) {
10119                 if (arg2) {
10120                     p = lock_user(VERIFY_WRITE, arg2,
10121                                   sizeof(target_siginfo_t), 0);
10122                     if (!p) {
10123                         return -TARGET_EFAULT;
10124                     }
10125                     host_to_target_siginfo(p, &uinfo);
10126                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10127                 }
10128                 ret = host_to_target_signal(ret);
10129             }
10130         }
10131         return ret;
10132 #endif
10133     case TARGET_NR_rt_sigqueueinfo:
10134         {
10135             siginfo_t uinfo;
10136 
10137             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10138             if (!p) {
10139                 return -TARGET_EFAULT;
10140             }
10141             target_to_host_siginfo(&uinfo, p);
10142             unlock_user(p, arg3, 0);
10143             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10144         }
10145         return ret;
10146     case TARGET_NR_rt_tgsigqueueinfo:
10147         {
10148             siginfo_t uinfo;
10149 
10150             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10151             if (!p) {
10152                 return -TARGET_EFAULT;
10153             }
10154             target_to_host_siginfo(&uinfo, p);
10155             unlock_user(p, arg4, 0);
10156             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10157         }
10158         return ret;
10159 #ifdef TARGET_NR_sigreturn
10160     case TARGET_NR_sigreturn:
10161         if (block_signals()) {
10162             return -QEMU_ERESTARTSYS;
10163         }
10164         return do_sigreturn(cpu_env);
10165 #endif
10166     case TARGET_NR_rt_sigreturn:
10167         if (block_signals()) {
10168             return -QEMU_ERESTARTSYS;
10169         }
10170         return do_rt_sigreturn(cpu_env);
10171     case TARGET_NR_sethostname:
10172         if (!(p = lock_user_string(arg1)))
10173             return -TARGET_EFAULT;
10174         ret = get_errno(sethostname(p, arg2));
10175         unlock_user(p, arg1, 0);
10176         return ret;
10177 #ifdef TARGET_NR_setrlimit
10178     case TARGET_NR_setrlimit:
10179         {
10180             int resource = target_to_host_resource(arg1);
10181             struct target_rlimit *target_rlim;
10182             struct rlimit rlim;
10183             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10184                 return -TARGET_EFAULT;
10185             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10186             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10187             unlock_user_struct(target_rlim, arg2, 0);
10188             /*
10189              * If we just passed through resource limit settings for memory then
10190              * they would also apply to QEMU's own allocations, and QEMU will
10191              * crash or hang or die if its allocations fail. Ideally we would
10192              * track the guest allocations in QEMU and apply the limits ourselves.
10193              * For now, just tell the guest the call succeeded but don't actually
10194              * limit anything.
10195              */
10196             if (resource != RLIMIT_AS &&
10197                 resource != RLIMIT_DATA &&
10198                 resource != RLIMIT_STACK) {
10199                 return get_errno(setrlimit(resource, &rlim));
10200             } else {
10201                 return 0;
10202             }
10203         }
10204 #endif
10205 #ifdef TARGET_NR_getrlimit
10206     case TARGET_NR_getrlimit:
10207         {
10208             int resource = target_to_host_resource(arg1);
10209             struct target_rlimit *target_rlim;
10210             struct rlimit rlim;
10211 
10212             ret = get_errno(getrlimit(resource, &rlim));
10213             if (!is_error(ret)) {
10214                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10215                     return -TARGET_EFAULT;
10216                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10217                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10218                 unlock_user_struct(target_rlim, arg2, 1);
10219             }
10220         }
10221         return ret;
10222 #endif
10223     case TARGET_NR_getrusage:
10224         {
10225             struct rusage rusage;
10226             ret = get_errno(getrusage(arg1, &rusage));
10227             if (!is_error(ret)) {
10228                 ret = host_to_target_rusage(arg2, &rusage);
10229             }
10230         }
10231         return ret;
10232 #if defined(TARGET_NR_gettimeofday)
10233     case TARGET_NR_gettimeofday:
10234         {
10235             struct timeval tv;
10236             struct timezone tz;
10237 
10238             ret = get_errno(gettimeofday(&tv, &tz));
10239             if (!is_error(ret)) {
10240                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10241                     return -TARGET_EFAULT;
10242                 }
10243                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10244                     return -TARGET_EFAULT;
10245                 }
10246             }
10247         }
10248         return ret;
10249 #endif
10250 #if defined(TARGET_NR_settimeofday)
10251     case TARGET_NR_settimeofday:
10252         {
10253             struct timeval tv, *ptv = NULL;
10254             struct timezone tz, *ptz = NULL;
10255 
10256             if (arg1) {
10257                 if (copy_from_user_timeval(&tv, arg1)) {
10258                     return -TARGET_EFAULT;
10259                 }
10260                 ptv = &tv;
10261             }
10262 
10263             if (arg2) {
10264                 if (copy_from_user_timezone(&tz, arg2)) {
10265                     return -TARGET_EFAULT;
10266                 }
10267                 ptz = &tz;
10268             }
10269 
10270             return get_errno(settimeofday(ptv, ptz));
10271         }
10272 #endif
10273 #if defined(TARGET_NR_select)
10274     case TARGET_NR_select:
10275 #if defined(TARGET_WANT_NI_OLD_SELECT)
10276         /* some architectures used to have old_select here
10277          * but now ENOSYS it.
10278          */
10279         ret = -TARGET_ENOSYS;
10280 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10281         ret = do_old_select(arg1);
10282 #else
10283         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10284 #endif
10285         return ret;
10286 #endif
10287 #ifdef TARGET_NR_pselect6
10288     case TARGET_NR_pselect6:
10289         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10290 #endif
10291 #ifdef TARGET_NR_pselect6_time64
10292     case TARGET_NR_pselect6_time64:
10293         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10294 #endif
10295 #ifdef TARGET_NR_symlink
10296     case TARGET_NR_symlink:
10297         {
10298             void *p2;
10299             p = lock_user_string(arg1);
10300             p2 = lock_user_string(arg2);
10301             if (!p || !p2)
10302                 ret = -TARGET_EFAULT;
10303             else
10304                 ret = get_errno(symlink(p, p2));
10305             unlock_user(p2, arg2, 0);
10306             unlock_user(p, arg1, 0);
10307         }
10308         return ret;
10309 #endif
10310 #if defined(TARGET_NR_symlinkat)
10311     case TARGET_NR_symlinkat:
10312         {
10313             void *p2;
10314             p  = lock_user_string(arg1);
10315             p2 = lock_user_string(arg3);
10316             if (!p || !p2)
10317                 ret = -TARGET_EFAULT;
10318             else
10319                 ret = get_errno(symlinkat(p, arg2, p2));
10320             unlock_user(p2, arg3, 0);
10321             unlock_user(p, arg1, 0);
10322         }
10323         return ret;
10324 #endif
10325 #ifdef TARGET_NR_readlink
10326     case TARGET_NR_readlink:
10327         {
10328             void *p2;
10329             p = lock_user_string(arg1);
10330             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10331             ret = get_errno(do_guest_readlink(p, p2, arg3));
10332             unlock_user(p2, arg2, ret);
10333             unlock_user(p, arg1, 0);
10334         }
10335         return ret;
10336 #endif
10337 #if defined(TARGET_NR_readlinkat)
10338     case TARGET_NR_readlinkat:
10339         {
10340             void *p2;
10341             p  = lock_user_string(arg2);
10342             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10343             if (!p || !p2) {
10344                 ret = -TARGET_EFAULT;
10345             } else if (!arg4) {
10346                 /* Short circuit this for the magic exe check. */
10347                 ret = -TARGET_EINVAL;
10348             } else if (is_proc_myself((const char *)p, "exe")) {
10349                 /*
10350                  * Don't worry about sign mismatch as earlier mapping
10351                  * logic would have thrown a bad address error.
10352                  */
10353                 ret = MIN(strlen(exec_path), arg4);
10354                 /* We cannot NUL terminate the string. */
10355                 memcpy(p2, exec_path, ret);
10356             } else {
10357                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10358             }
10359             unlock_user(p2, arg3, ret);
10360             unlock_user(p, arg2, 0);
10361         }
10362         return ret;
10363 #endif
10364 #ifdef TARGET_NR_swapon
10365     case TARGET_NR_swapon:
10366         if (!(p = lock_user_string(arg1)))
10367             return -TARGET_EFAULT;
10368         ret = get_errno(swapon(p, arg2));
10369         unlock_user(p, arg1, 0);
10370         return ret;
10371 #endif
10372     case TARGET_NR_reboot:
10373         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10374            /* arg4 must be ignored in all other cases */
10375            p = lock_user_string(arg4);
10376            if (!p) {
10377                return -TARGET_EFAULT;
10378            }
10379            ret = get_errno(reboot(arg1, arg2, arg3, p));
10380            unlock_user(p, arg4, 0);
10381         } else {
10382            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10383         }
10384         return ret;
10385 #ifdef TARGET_NR_mmap
10386     case TARGET_NR_mmap:
10387 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10388     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10389     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10390     || defined(TARGET_S390X)
10391         {
10392             abi_ulong *v;
10393             abi_ulong v1, v2, v3, v4, v5, v6;
10394             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10395                 return -TARGET_EFAULT;
10396             v1 = tswapal(v[0]);
10397             v2 = tswapal(v[1]);
10398             v3 = tswapal(v[2]);
10399             v4 = tswapal(v[3]);
10400             v5 = tswapal(v[4]);
10401             v6 = tswapal(v[5]);
10402             unlock_user(v, arg1, 0);
10403             ret = get_errno(target_mmap(v1, v2, v3,
10404                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10405                                         v5, v6));
10406         }
10407 #else
10408         /* mmap pointers are always untagged */
10409         ret = get_errno(target_mmap(arg1, arg2, arg3,
10410                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10411                                     arg5,
10412                                     arg6));
10413 #endif
10414         return ret;
10415 #endif
10416 #ifdef TARGET_NR_mmap2
10417     case TARGET_NR_mmap2:
10418 #ifndef MMAP_SHIFT
10419 #define MMAP_SHIFT 12
10420 #endif
10421         ret = target_mmap(arg1, arg2, arg3,
10422                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10423                           arg5, arg6 << MMAP_SHIFT);
10424         return get_errno(ret);
10425 #endif
10426     case TARGET_NR_munmap:
10427         arg1 = cpu_untagged_addr(cpu, arg1);
10428         return get_errno(target_munmap(arg1, arg2));
10429     case TARGET_NR_mprotect:
10430         arg1 = cpu_untagged_addr(cpu, arg1);
10431         {
10432             TaskState *ts = cpu->opaque;
10433             /* Special hack to detect libc making the stack executable.  */
10434             if ((arg3 & PROT_GROWSDOWN)
10435                 && arg1 >= ts->info->stack_limit
10436                 && arg1 <= ts->info->start_stack) {
10437                 arg3 &= ~PROT_GROWSDOWN;
10438                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10439                 arg1 = ts->info->stack_limit;
10440             }
10441         }
10442         return get_errno(target_mprotect(arg1, arg2, arg3));
10443 #ifdef TARGET_NR_mremap
10444     case TARGET_NR_mremap:
10445         arg1 = cpu_untagged_addr(cpu, arg1);
10446         /* mremap new_addr (arg5) is always untagged */
10447         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10448 #endif
10449         /* ??? msync/mlock/munlock are broken for softmmu.  */
10450 #ifdef TARGET_NR_msync
10451     case TARGET_NR_msync:
10452         return get_errno(msync(g2h(cpu, arg1), arg2,
10453                                target_to_host_msync_arg(arg3)));
10454 #endif
10455 #ifdef TARGET_NR_mlock
10456     case TARGET_NR_mlock:
10457         return get_errno(mlock(g2h(cpu, arg1), arg2));
10458 #endif
10459 #ifdef TARGET_NR_munlock
10460     case TARGET_NR_munlock:
10461         return get_errno(munlock(g2h(cpu, arg1), arg2));
10462 #endif
10463 #ifdef TARGET_NR_mlockall
10464     case TARGET_NR_mlockall:
10465         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10466 #endif
10467 #ifdef TARGET_NR_munlockall
10468     case TARGET_NR_munlockall:
10469         return get_errno(munlockall());
10470 #endif
10471 #ifdef TARGET_NR_truncate
10472     case TARGET_NR_truncate:
10473         if (!(p = lock_user_string(arg1)))
10474             return -TARGET_EFAULT;
10475         ret = get_errno(truncate(p, arg2));
10476         unlock_user(p, arg1, 0);
10477         return ret;
10478 #endif
10479 #ifdef TARGET_NR_ftruncate
10480     case TARGET_NR_ftruncate:
10481         return get_errno(ftruncate(arg1, arg2));
10482 #endif
10483     case TARGET_NR_fchmod:
10484         return get_errno(fchmod(arg1, arg2));
10485 #if defined(TARGET_NR_fchmodat)
10486     case TARGET_NR_fchmodat:
10487         if (!(p = lock_user_string(arg2)))
10488             return -TARGET_EFAULT;
10489         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10490         unlock_user(p, arg2, 0);
10491         return ret;
10492 #endif
10493     case TARGET_NR_getpriority:
10494         /* Note that negative values are valid for getpriority, so we must
10495            differentiate based on errno settings.  */
10496         errno = 0;
10497         ret = getpriority(arg1, arg2);
10498         if (ret == -1 && errno != 0) {
10499             return -host_to_target_errno(errno);
10500         }
10501 #ifdef TARGET_ALPHA
10502         /* Return value is the unbiased priority.  Signal no error.  */
10503         cpu_env->ir[IR_V0] = 0;
10504 #else
10505         /* Return value is a biased priority to avoid negative numbers.  */
10506         ret = 20 - ret;
10507 #endif
10508         return ret;
10509     case TARGET_NR_setpriority:
10510         return get_errno(setpriority(arg1, arg2, arg3));
10511 #ifdef TARGET_NR_statfs
10512     case TARGET_NR_statfs:
10513         if (!(p = lock_user_string(arg1))) {
10514             return -TARGET_EFAULT;
10515         }
10516         ret = get_errno(statfs(path(p), &stfs));
10517         unlock_user(p, arg1, 0);
10518     convert_statfs:
10519         if (!is_error(ret)) {
10520             struct target_statfs *target_stfs;
10521 
10522             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10523                 return -TARGET_EFAULT;
10524             __put_user(stfs.f_type, &target_stfs->f_type);
10525             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10526             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10527             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10528             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10529             __put_user(stfs.f_files, &target_stfs->f_files);
10530             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10531             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10532             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10533             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10534             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10535 #ifdef _STATFS_F_FLAGS
10536             __put_user(stfs.f_flags, &target_stfs->f_flags);
10537 #else
10538             __put_user(0, &target_stfs->f_flags);
10539 #endif
10540             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10541             unlock_user_struct(target_stfs, arg2, 1);
10542         }
10543         return ret;
10544 #endif
10545 #ifdef TARGET_NR_fstatfs
10546     case TARGET_NR_fstatfs:
10547         ret = get_errno(fstatfs(arg1, &stfs));
10548         goto convert_statfs;
10549 #endif
10550 #ifdef TARGET_NR_statfs64
10551     case TARGET_NR_statfs64:
10552         if (!(p = lock_user_string(arg1))) {
10553             return -TARGET_EFAULT;
10554         }
10555         ret = get_errno(statfs(path(p), &stfs));
10556         unlock_user(p, arg1, 0);
10557     convert_statfs64:
10558         if (!is_error(ret)) {
10559             struct target_statfs64 *target_stfs;
10560 
10561             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10562                 return -TARGET_EFAULT;
10563             __put_user(stfs.f_type, &target_stfs->f_type);
10564             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10565             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10566             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10567             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10568             __put_user(stfs.f_files, &target_stfs->f_files);
10569             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10570             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10571             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10572             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10573             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10574 #ifdef _STATFS_F_FLAGS
10575             __put_user(stfs.f_flags, &target_stfs->f_flags);
10576 #else
10577             __put_user(0, &target_stfs->f_flags);
10578 #endif
10579             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10580             unlock_user_struct(target_stfs, arg3, 1);
10581         }
10582         return ret;
10583     case TARGET_NR_fstatfs64:
10584         ret = get_errno(fstatfs(arg1, &stfs));
10585         goto convert_statfs64;
10586 #endif
10587 #ifdef TARGET_NR_socketcall
10588     case TARGET_NR_socketcall:
10589         return do_socketcall(arg1, arg2);
10590 #endif
10591 #ifdef TARGET_NR_accept
10592     case TARGET_NR_accept:
10593         return do_accept4(arg1, arg2, arg3, 0);
10594 #endif
10595 #ifdef TARGET_NR_accept4
10596     case TARGET_NR_accept4:
10597         return do_accept4(arg1, arg2, arg3, arg4);
10598 #endif
10599 #ifdef TARGET_NR_bind
10600     case TARGET_NR_bind:
10601         return do_bind(arg1, arg2, arg3);
10602 #endif
10603 #ifdef TARGET_NR_connect
10604     case TARGET_NR_connect:
10605         return do_connect(arg1, arg2, arg3);
10606 #endif
10607 #ifdef TARGET_NR_getpeername
10608     case TARGET_NR_getpeername:
10609         return do_getpeername(arg1, arg2, arg3);
10610 #endif
10611 #ifdef TARGET_NR_getsockname
10612     case TARGET_NR_getsockname:
10613         return do_getsockname(arg1, arg2, arg3);
10614 #endif
10615 #ifdef TARGET_NR_getsockopt
10616     case TARGET_NR_getsockopt:
10617         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10618 #endif
10619 #ifdef TARGET_NR_listen
10620     case TARGET_NR_listen:
10621         return get_errno(listen(arg1, arg2));
10622 #endif
10623 #ifdef TARGET_NR_recv
10624     case TARGET_NR_recv:
10625         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10626 #endif
10627 #ifdef TARGET_NR_recvfrom
10628     case TARGET_NR_recvfrom:
10629         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10630 #endif
10631 #ifdef TARGET_NR_recvmsg
10632     case TARGET_NR_recvmsg:
10633         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10634 #endif
10635 #ifdef TARGET_NR_send
10636     case TARGET_NR_send:
10637         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10638 #endif
10639 #ifdef TARGET_NR_sendmsg
10640     case TARGET_NR_sendmsg:
10641         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10642 #endif
10643 #ifdef TARGET_NR_sendmmsg
10644     case TARGET_NR_sendmmsg:
10645         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10646 #endif
10647 #ifdef TARGET_NR_recvmmsg
10648     case TARGET_NR_recvmmsg:
10649         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10650 #endif
10651 #ifdef TARGET_NR_sendto
10652     case TARGET_NR_sendto:
10653         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10654 #endif
10655 #ifdef TARGET_NR_shutdown
10656     case TARGET_NR_shutdown:
10657         return get_errno(shutdown(arg1, arg2));
10658 #endif
10659 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10660     case TARGET_NR_getrandom:
10661         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10662         if (!p) {
10663             return -TARGET_EFAULT;
10664         }
10665         ret = get_errno(getrandom(p, arg2, arg3));
10666         unlock_user(p, arg1, ret);
10667         return ret;
10668 #endif
10669 #ifdef TARGET_NR_socket
10670     case TARGET_NR_socket:
10671         return do_socket(arg1, arg2, arg3);
10672 #endif
10673 #ifdef TARGET_NR_socketpair
10674     case TARGET_NR_socketpair:
10675         return do_socketpair(arg1, arg2, arg3, arg4);
10676 #endif
10677 #ifdef TARGET_NR_setsockopt
10678     case TARGET_NR_setsockopt:
10679         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10680 #endif
10681 #if defined(TARGET_NR_syslog)
10682     case TARGET_NR_syslog:
10683         {
10684             int len = arg2;
10685 
10686             switch (arg1) {
10687             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10688             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10689             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10690             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10691             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10692             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10693             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10694             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10695                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10696             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10697             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10698             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10699                 {
10700                     if (len < 0) {
10701                         return -TARGET_EINVAL;
10702                     }
10703                     if (len == 0) {
10704                         return 0;
10705                     }
10706                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10707                     if (!p) {
10708                         return -TARGET_EFAULT;
10709                     }
10710                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10711                     unlock_user(p, arg2, arg3);
10712                 }
10713                 return ret;
10714             default:
10715                 return -TARGET_EINVAL;
10716             }
10717         }
10718         break;
10719 #endif
10720     case TARGET_NR_setitimer:
10721         {
10722             struct itimerval value, ovalue, *pvalue;
10723 
10724             if (arg2) {
10725                 pvalue = &value;
10726                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10727                     || copy_from_user_timeval(&pvalue->it_value,
10728                                               arg2 + sizeof(struct target_timeval)))
10729                     return -TARGET_EFAULT;
10730             } else {
10731                 pvalue = NULL;
10732             }
10733             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10734             if (!is_error(ret) && arg3) {
10735                 if (copy_to_user_timeval(arg3,
10736                                          &ovalue.it_interval)
10737                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10738                                             &ovalue.it_value))
10739                     return -TARGET_EFAULT;
10740             }
10741         }
10742         return ret;
10743     case TARGET_NR_getitimer:
10744         {
10745             struct itimerval value;
10746 
10747             ret = get_errno(getitimer(arg1, &value));
10748             if (!is_error(ret) && arg2) {
10749                 if (copy_to_user_timeval(arg2,
10750                                          &value.it_interval)
10751                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10752                                             &value.it_value))
10753                     return -TARGET_EFAULT;
10754             }
10755         }
10756         return ret;
10757 #ifdef TARGET_NR_stat
10758     case TARGET_NR_stat:
10759         if (!(p = lock_user_string(arg1))) {
10760             return -TARGET_EFAULT;
10761         }
10762         ret = get_errno(stat(path(p), &st));
10763         unlock_user(p, arg1, 0);
10764         goto do_stat;
10765 #endif
10766 #ifdef TARGET_NR_lstat
10767     case TARGET_NR_lstat:
10768         if (!(p = lock_user_string(arg1))) {
10769             return -TARGET_EFAULT;
10770         }
10771         ret = get_errno(lstat(path(p), &st));
10772         unlock_user(p, arg1, 0);
10773         goto do_stat;
10774 #endif
10775 #ifdef TARGET_NR_fstat
10776     case TARGET_NR_fstat:
10777         {
10778             ret = get_errno(fstat(arg1, &st));
10779 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10780         do_stat:
10781 #endif
10782             if (!is_error(ret)) {
10783                 struct target_stat *target_st;
10784 
10785                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10786                     return -TARGET_EFAULT;
10787                 memset(target_st, 0, sizeof(*target_st));
10788                 __put_user(st.st_dev, &target_st->st_dev);
10789                 __put_user(st.st_ino, &target_st->st_ino);
10790                 __put_user(st.st_mode, &target_st->st_mode);
10791                 __put_user(st.st_uid, &target_st->st_uid);
10792                 __put_user(st.st_gid, &target_st->st_gid);
10793                 __put_user(st.st_nlink, &target_st->st_nlink);
10794                 __put_user(st.st_rdev, &target_st->st_rdev);
10795                 __put_user(st.st_size, &target_st->st_size);
10796                 __put_user(st.st_blksize, &target_st->st_blksize);
10797                 __put_user(st.st_blocks, &target_st->st_blocks);
10798                 __put_user(st.st_atime, &target_st->target_st_atime);
10799                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10800                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10801 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10802                 __put_user(st.st_atim.tv_nsec,
10803                            &target_st->target_st_atime_nsec);
10804                 __put_user(st.st_mtim.tv_nsec,
10805                            &target_st->target_st_mtime_nsec);
10806                 __put_user(st.st_ctim.tv_nsec,
10807                            &target_st->target_st_ctime_nsec);
10808 #endif
10809                 unlock_user_struct(target_st, arg2, 1);
10810             }
10811         }
10812         return ret;
10813 #endif
10814     case TARGET_NR_vhangup:
10815         return get_errno(vhangup());
10816 #ifdef TARGET_NR_syscall
10817     case TARGET_NR_syscall:
10818         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10819                           arg6, arg7, arg8, 0);
10820 #endif
10821 #if defined(TARGET_NR_wait4)
10822     case TARGET_NR_wait4:
10823         {
10824             int status;
10825             abi_long status_ptr = arg2;
10826             struct rusage rusage, *rusage_ptr;
10827             abi_ulong target_rusage = arg4;
10828             abi_long rusage_err;
10829             if (target_rusage)
10830                 rusage_ptr = &rusage;
10831             else
10832                 rusage_ptr = NULL;
10833             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10834             if (!is_error(ret)) {
10835                 if (status_ptr && ret) {
10836                     status = host_to_target_waitstatus(status);
10837                     if (put_user_s32(status, status_ptr))
10838                         return -TARGET_EFAULT;
10839                 }
10840                 if (target_rusage) {
10841                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10842                     if (rusage_err) {
10843                         ret = rusage_err;
10844                     }
10845                 }
10846             }
10847         }
10848         return ret;
10849 #endif
10850 #ifdef TARGET_NR_swapoff
10851     case TARGET_NR_swapoff:
10852         if (!(p = lock_user_string(arg1)))
10853             return -TARGET_EFAULT;
10854         ret = get_errno(swapoff(p));
10855         unlock_user(p, arg1, 0);
10856         return ret;
10857 #endif
10858     case TARGET_NR_sysinfo:
10859         {
10860             struct target_sysinfo *target_value;
10861             struct sysinfo value;
10862             ret = get_errno(sysinfo(&value));
10863             if (!is_error(ret) && arg1)
10864             {
10865                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10866                     return -TARGET_EFAULT;
10867                 __put_user(value.uptime, &target_value->uptime);
10868                 __put_user(value.loads[0], &target_value->loads[0]);
10869                 __put_user(value.loads[1], &target_value->loads[1]);
10870                 __put_user(value.loads[2], &target_value->loads[2]);
10871                 __put_user(value.totalram, &target_value->totalram);
10872                 __put_user(value.freeram, &target_value->freeram);
10873                 __put_user(value.sharedram, &target_value->sharedram);
10874                 __put_user(value.bufferram, &target_value->bufferram);
10875                 __put_user(value.totalswap, &target_value->totalswap);
10876                 __put_user(value.freeswap, &target_value->freeswap);
10877                 __put_user(value.procs, &target_value->procs);
10878                 __put_user(value.totalhigh, &target_value->totalhigh);
10879                 __put_user(value.freehigh, &target_value->freehigh);
10880                 __put_user(value.mem_unit, &target_value->mem_unit);
10881                 unlock_user_struct(target_value, arg1, 1);
10882             }
10883         }
10884         return ret;
10885 #ifdef TARGET_NR_ipc
10886     case TARGET_NR_ipc:
10887         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10888 #endif
10889 #ifdef TARGET_NR_semget
10890     case TARGET_NR_semget:
10891         return get_errno(semget(arg1, arg2, arg3));
10892 #endif
10893 #ifdef TARGET_NR_semop
10894     case TARGET_NR_semop:
10895         return do_semtimedop(arg1, arg2, arg3, 0, false);
10896 #endif
10897 #ifdef TARGET_NR_semtimedop
10898     case TARGET_NR_semtimedop:
10899         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10900 #endif
10901 #ifdef TARGET_NR_semtimedop_time64
10902     case TARGET_NR_semtimedop_time64:
10903         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10904 #endif
10905 #ifdef TARGET_NR_semctl
10906     case TARGET_NR_semctl:
10907         return do_semctl(arg1, arg2, arg3, arg4);
10908 #endif
10909 #ifdef TARGET_NR_msgctl
10910     case TARGET_NR_msgctl:
10911         return do_msgctl(arg1, arg2, arg3);
10912 #endif
10913 #ifdef TARGET_NR_msgget
10914     case TARGET_NR_msgget:
10915         return get_errno(msgget(arg1, arg2));
10916 #endif
10917 #ifdef TARGET_NR_msgrcv
10918     case TARGET_NR_msgrcv:
10919         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10920 #endif
10921 #ifdef TARGET_NR_msgsnd
10922     case TARGET_NR_msgsnd:
10923         return do_msgsnd(arg1, arg2, arg3, arg4);
10924 #endif
10925 #ifdef TARGET_NR_shmget
10926     case TARGET_NR_shmget:
10927         return get_errno(shmget(arg1, arg2, arg3));
10928 #endif
10929 #ifdef TARGET_NR_shmctl
10930     case TARGET_NR_shmctl:
10931         return do_shmctl(arg1, arg2, arg3);
10932 #endif
10933 #ifdef TARGET_NR_shmat
10934     case TARGET_NR_shmat:
10935         return do_shmat(cpu_env, arg1, arg2, arg3);
10936 #endif
10937 #ifdef TARGET_NR_shmdt
10938     case TARGET_NR_shmdt:
10939         return do_shmdt(arg1);
10940 #endif
10941     case TARGET_NR_fsync:
10942         return get_errno(fsync(arg1));
10943     case TARGET_NR_clone:
10944         /* Linux manages to have three different orderings for its
10945          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10946          * match the kernel's CONFIG_CLONE_* settings.
10947          * Microblaze is further special in that it uses a sixth
10948          * implicit argument to clone for the TLS pointer.
10949          */
10950 #if defined(TARGET_MICROBLAZE)
10951         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10952 #elif defined(TARGET_CLONE_BACKWARDS)
10953         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10954 #elif defined(TARGET_CLONE_BACKWARDS2)
10955         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10956 #else
10957         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10958 #endif
10959         return ret;
10960 #ifdef __NR_exit_group
10961         /* new thread calls */
10962     case TARGET_NR_exit_group:
10963         preexit_cleanup(cpu_env, arg1);
10964         return get_errno(exit_group(arg1));
10965 #endif
10966     case TARGET_NR_setdomainname:
10967         if (!(p = lock_user_string(arg1)))
10968             return -TARGET_EFAULT;
10969         ret = get_errno(setdomainname(p, arg2));
10970         unlock_user(p, arg1, 0);
10971         return ret;
10972     case TARGET_NR_uname:
10973         /* no need to transcode because we use the linux syscall */
10974         {
10975             struct new_utsname * buf;
10976 
10977             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10978                 return -TARGET_EFAULT;
10979             ret = get_errno(sys_uname(buf));
10980             if (!is_error(ret)) {
10981                 /* Overwrite the native machine name with whatever is being
10982                    emulated. */
10983                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10984                           sizeof(buf->machine));
10985                 /* Allow the user to override the reported release.  */
10986                 if (qemu_uname_release && *qemu_uname_release) {
10987                     g_strlcpy(buf->release, qemu_uname_release,
10988                               sizeof(buf->release));
10989                 }
10990             }
10991             unlock_user_struct(buf, arg1, 1);
10992         }
10993         return ret;
10994 #ifdef TARGET_I386
10995     case TARGET_NR_modify_ldt:
10996         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10997 #if !defined(TARGET_X86_64)
10998     case TARGET_NR_vm86:
10999         return do_vm86(cpu_env, arg1, arg2);
11000 #endif
11001 #endif
11002 #if defined(TARGET_NR_adjtimex)
11003     case TARGET_NR_adjtimex:
11004         {
11005             struct timex host_buf;
11006 
11007             if (target_to_host_timex(&host_buf, arg1) != 0) {
11008                 return -TARGET_EFAULT;
11009             }
11010             ret = get_errno(adjtimex(&host_buf));
11011             if (!is_error(ret)) {
11012                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11013                     return -TARGET_EFAULT;
11014                 }
11015             }
11016         }
11017         return ret;
11018 #endif
11019 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11020     case TARGET_NR_clock_adjtime:
11021         {
11022             struct timex htx, *phtx = &htx;
11023 
11024             if (target_to_host_timex(phtx, arg2) != 0) {
11025                 return -TARGET_EFAULT;
11026             }
11027             ret = get_errno(clock_adjtime(arg1, phtx));
11028             if (!is_error(ret) && phtx) {
11029                 if (host_to_target_timex(arg2, phtx) != 0) {
11030                     return -TARGET_EFAULT;
11031                 }
11032             }
11033         }
11034         return ret;
11035 #endif
11036 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11037     case TARGET_NR_clock_adjtime64:
11038         {
11039             struct timex htx;
11040 
11041             if (target_to_host_timex64(&htx, arg2) != 0) {
11042                 return -TARGET_EFAULT;
11043             }
11044             ret = get_errno(clock_adjtime(arg1, &htx));
11045             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11046                     return -TARGET_EFAULT;
11047             }
11048         }
11049         return ret;
11050 #endif
11051     case TARGET_NR_getpgid:
11052         return get_errno(getpgid(arg1));
11053     case TARGET_NR_fchdir:
11054         return get_errno(fchdir(arg1));
11055     case TARGET_NR_personality:
11056         return get_errno(personality(arg1));
11057 #ifdef TARGET_NR__llseek /* Not on alpha */
11058     case TARGET_NR__llseek:
11059         {
11060             int64_t res;
11061 #if !defined(__NR_llseek)
11062             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11063             if (res == -1) {
11064                 ret = get_errno(res);
11065             } else {
11066                 ret = 0;
11067             }
11068 #else
11069             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11070 #endif
11071             if ((ret == 0) && put_user_s64(res, arg4)) {
11072                 return -TARGET_EFAULT;
11073             }
11074         }
11075         return ret;
11076 #endif
11077 #ifdef TARGET_NR_getdents
11078     case TARGET_NR_getdents:
11079         return do_getdents(arg1, arg2, arg3);
11080 #endif /* TARGET_NR_getdents */
11081 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11082     case TARGET_NR_getdents64:
11083         return do_getdents64(arg1, arg2, arg3);
11084 #endif /* TARGET_NR_getdents64 */
11085 #if defined(TARGET_NR__newselect)
11086     case TARGET_NR__newselect:
11087         return do_select(arg1, arg2, arg3, arg4, arg5);
11088 #endif
11089 #ifdef TARGET_NR_poll
11090     case TARGET_NR_poll:
11091         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11092 #endif
11093 #ifdef TARGET_NR_ppoll
11094     case TARGET_NR_ppoll:
11095         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11096 #endif
11097 #ifdef TARGET_NR_ppoll_time64
11098     case TARGET_NR_ppoll_time64:
11099         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11100 #endif
11101     case TARGET_NR_flock:
11102         /* NOTE: the flock constant seems to be the same for every
11103            Linux platform */
11104         return get_errno(safe_flock(arg1, arg2));
11105     case TARGET_NR_readv:
11106         {
11107             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11108             if (vec != NULL) {
11109                 ret = get_errno(safe_readv(arg1, vec, arg3));
11110                 unlock_iovec(vec, arg2, arg3, 1);
11111             } else {
11112                 ret = -host_to_target_errno(errno);
11113             }
11114         }
11115         return ret;
11116     case TARGET_NR_writev:
11117         {
11118             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11119             if (vec != NULL) {
11120                 ret = get_errno(safe_writev(arg1, vec, arg3));
11121                 unlock_iovec(vec, arg2, arg3, 0);
11122             } else {
11123                 ret = -host_to_target_errno(errno);
11124             }
11125         }
11126         return ret;
11127 #if defined(TARGET_NR_preadv)
11128     case TARGET_NR_preadv:
11129         {
11130             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11131             if (vec != NULL) {
11132                 unsigned long low, high;
11133 
11134                 target_to_host_low_high(arg4, arg5, &low, &high);
11135                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11136                 unlock_iovec(vec, arg2, arg3, 1);
11137             } else {
11138                 ret = -host_to_target_errno(errno);
11139            }
11140         }
11141         return ret;
11142 #endif
11143 #if defined(TARGET_NR_pwritev)
11144     case TARGET_NR_pwritev:
11145         {
11146             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11147             if (vec != NULL) {
11148                 unsigned long low, high;
11149 
11150                 target_to_host_low_high(arg4, arg5, &low, &high);
11151                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11152                 unlock_iovec(vec, arg2, arg3, 0);
11153             } else {
11154                 ret = -host_to_target_errno(errno);
11155            }
11156         }
11157         return ret;
11158 #endif
11159     case TARGET_NR_getsid:
11160         return get_errno(getsid(arg1));
11161 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11162     case TARGET_NR_fdatasync:
11163         return get_errno(fdatasync(arg1));
11164 #endif
11165     case TARGET_NR_sched_getaffinity:
11166         {
11167             unsigned int mask_size;
11168             unsigned long *mask;
11169 
11170             /*
11171              * sched_getaffinity needs multiples of ulong, so need to take
11172              * care of mismatches between target ulong and host ulong sizes.
11173              */
11174             if (arg2 & (sizeof(abi_ulong) - 1)) {
11175                 return -TARGET_EINVAL;
11176             }
11177             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11178 
11179             mask = alloca(mask_size);
11180             memset(mask, 0, mask_size);
11181             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11182 
11183             if (!is_error(ret)) {
11184                 if (ret > arg2) {
11185                     /* More data returned than the caller's buffer will fit.
11186                      * This only happens if sizeof(abi_long) < sizeof(long)
11187                      * and the caller passed us a buffer holding an odd number
11188                      * of abi_longs. If the host kernel is actually using the
11189                      * extra 4 bytes then fail EINVAL; otherwise we can just
11190                      * ignore them and only copy the interesting part.
11191                      */
11192                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11193                     if (numcpus > arg2 * 8) {
11194                         return -TARGET_EINVAL;
11195                     }
11196                     ret = arg2;
11197                 }
11198 
11199                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11200                     return -TARGET_EFAULT;
11201                 }
11202             }
11203         }
11204         return ret;
11205     case TARGET_NR_sched_setaffinity:
11206         {
11207             unsigned int mask_size;
11208             unsigned long *mask;
11209 
11210             /*
11211              * sched_setaffinity needs multiples of ulong, so need to take
11212              * care of mismatches between target ulong and host ulong sizes.
11213              */
11214             if (arg2 & (sizeof(abi_ulong) - 1)) {
11215                 return -TARGET_EINVAL;
11216             }
11217             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11218             mask = alloca(mask_size);
11219 
11220             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11221             if (ret) {
11222                 return ret;
11223             }
11224 
11225             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11226         }
11227     case TARGET_NR_getcpu:
11228         {
11229             unsigned cpu, node;
11230             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11231                                        arg2 ? &node : NULL,
11232                                        NULL));
11233             if (is_error(ret)) {
11234                 return ret;
11235             }
11236             if (arg1 && put_user_u32(cpu, arg1)) {
11237                 return -TARGET_EFAULT;
11238             }
11239             if (arg2 && put_user_u32(node, arg2)) {
11240                 return -TARGET_EFAULT;
11241             }
11242         }
11243         return ret;
11244     case TARGET_NR_sched_setparam:
11245         {
11246             struct target_sched_param *target_schp;
11247             struct sched_param schp;
11248 
11249             if (arg2 == 0) {
11250                 return -TARGET_EINVAL;
11251             }
11252             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11253                 return -TARGET_EFAULT;
11254             }
11255             schp.sched_priority = tswap32(target_schp->sched_priority);
11256             unlock_user_struct(target_schp, arg2, 0);
11257             return get_errno(sys_sched_setparam(arg1, &schp));
11258         }
11259     case TARGET_NR_sched_getparam:
11260         {
11261             struct target_sched_param *target_schp;
11262             struct sched_param schp;
11263 
11264             if (arg2 == 0) {
11265                 return -TARGET_EINVAL;
11266             }
11267             ret = get_errno(sys_sched_getparam(arg1, &schp));
11268             if (!is_error(ret)) {
11269                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11270                     return -TARGET_EFAULT;
11271                 }
11272                 target_schp->sched_priority = tswap32(schp.sched_priority);
11273                 unlock_user_struct(target_schp, arg2, 1);
11274             }
11275         }
11276         return ret;
11277     case TARGET_NR_sched_setscheduler:
11278         {
11279             struct target_sched_param *target_schp;
11280             struct sched_param schp;
11281             if (arg3 == 0) {
11282                 return -TARGET_EINVAL;
11283             }
11284             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11285                 return -TARGET_EFAULT;
11286             }
11287             schp.sched_priority = tswap32(target_schp->sched_priority);
11288             unlock_user_struct(target_schp, arg3, 0);
11289             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11290         }
11291     case TARGET_NR_sched_getscheduler:
11292         return get_errno(sys_sched_getscheduler(arg1));
11293     case TARGET_NR_sched_getattr:
11294         {
11295             struct target_sched_attr *target_scha;
11296             struct sched_attr scha;
11297             if (arg2 == 0) {
11298                 return -TARGET_EINVAL;
11299             }
11300             if (arg3 > sizeof(scha)) {
11301                 arg3 = sizeof(scha);
11302             }
11303             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11304             if (!is_error(ret)) {
11305                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11306                 if (!target_scha) {
11307                     return -TARGET_EFAULT;
11308                 }
11309                 target_scha->size = tswap32(scha.size);
11310                 target_scha->sched_policy = tswap32(scha.sched_policy);
11311                 target_scha->sched_flags = tswap64(scha.sched_flags);
11312                 target_scha->sched_nice = tswap32(scha.sched_nice);
11313                 target_scha->sched_priority = tswap32(scha.sched_priority);
11314                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11315                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11316                 target_scha->sched_period = tswap64(scha.sched_period);
11317                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11318                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11319                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11320                 }
11321                 unlock_user(target_scha, arg2, arg3);
11322             }
11323             return ret;
11324         }
11325     case TARGET_NR_sched_setattr:
11326         {
11327             struct target_sched_attr *target_scha;
11328             struct sched_attr scha;
11329             uint32_t size;
11330             int zeroed;
11331             if (arg2 == 0) {
11332                 return -TARGET_EINVAL;
11333             }
11334             if (get_user_u32(size, arg2)) {
11335                 return -TARGET_EFAULT;
11336             }
11337             if (!size) {
11338                 size = offsetof(struct target_sched_attr, sched_util_min);
11339             }
11340             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11341                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11342                     return -TARGET_EFAULT;
11343                 }
11344                 return -TARGET_E2BIG;
11345             }
11346 
11347             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11348             if (zeroed < 0) {
11349                 return zeroed;
11350             } else if (zeroed == 0) {
11351                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11352                     return -TARGET_EFAULT;
11353                 }
11354                 return -TARGET_E2BIG;
11355             }
11356             if (size > sizeof(struct target_sched_attr)) {
11357                 size = sizeof(struct target_sched_attr);
11358             }
11359 
11360             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11361             if (!target_scha) {
11362                 return -TARGET_EFAULT;
11363             }
11364             scha.size = size;
11365             scha.sched_policy = tswap32(target_scha->sched_policy);
11366             scha.sched_flags = tswap64(target_scha->sched_flags);
11367             scha.sched_nice = tswap32(target_scha->sched_nice);
11368             scha.sched_priority = tswap32(target_scha->sched_priority);
11369             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11370             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11371             scha.sched_period = tswap64(target_scha->sched_period);
11372             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11373                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11374                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11375             }
11376             unlock_user(target_scha, arg2, 0);
11377             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11378         }
11379     case TARGET_NR_sched_yield:
11380         return get_errno(sched_yield());
11381     case TARGET_NR_sched_get_priority_max:
11382         return get_errno(sched_get_priority_max(arg1));
11383     case TARGET_NR_sched_get_priority_min:
11384         return get_errno(sched_get_priority_min(arg1));
11385 #ifdef TARGET_NR_sched_rr_get_interval
11386     case TARGET_NR_sched_rr_get_interval:
11387         {
11388             struct timespec ts;
11389             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11390             if (!is_error(ret)) {
11391                 ret = host_to_target_timespec(arg2, &ts);
11392             }
11393         }
11394         return ret;
11395 #endif
11396 #ifdef TARGET_NR_sched_rr_get_interval_time64
11397     case TARGET_NR_sched_rr_get_interval_time64:
11398         {
11399             struct timespec ts;
11400             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11401             if (!is_error(ret)) {
11402                 ret = host_to_target_timespec64(arg2, &ts);
11403             }
11404         }
11405         return ret;
11406 #endif
11407 #if defined(TARGET_NR_nanosleep)
11408     case TARGET_NR_nanosleep:
11409         {
11410             struct timespec req, rem;
11411             target_to_host_timespec(&req, arg1);
11412             ret = get_errno(safe_nanosleep(&req, &rem));
11413             if (is_error(ret) && arg2) {
11414                 host_to_target_timespec(arg2, &rem);
11415             }
11416         }
11417         return ret;
11418 #endif
11419     case TARGET_NR_prctl:
11420         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11421         break;
11422 #ifdef TARGET_NR_arch_prctl
11423     case TARGET_NR_arch_prctl:
11424         return do_arch_prctl(cpu_env, arg1, arg2);
11425 #endif
11426 #ifdef TARGET_NR_pread64
11427     case TARGET_NR_pread64:
11428         if (regpairs_aligned(cpu_env, num)) {
11429             arg4 = arg5;
11430             arg5 = arg6;
11431         }
11432         if (arg2 == 0 && arg3 == 0) {
11433             /* Special-case NULL buffer and zero length, which should succeed */
11434             p = 0;
11435         } else {
11436             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11437             if (!p) {
11438                 return -TARGET_EFAULT;
11439             }
11440         }
11441         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11442         unlock_user(p, arg2, ret);
11443         return ret;
11444     case TARGET_NR_pwrite64:
11445         if (regpairs_aligned(cpu_env, num)) {
11446             arg4 = arg5;
11447             arg5 = arg6;
11448         }
11449         if (arg2 == 0 && arg3 == 0) {
11450             /* Special-case NULL buffer and zero length, which should succeed */
11451             p = 0;
11452         } else {
11453             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11454             if (!p) {
11455                 return -TARGET_EFAULT;
11456             }
11457         }
11458         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11459         unlock_user(p, arg2, 0);
11460         return ret;
11461 #endif
11462     case TARGET_NR_getcwd:
11463         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11464             return -TARGET_EFAULT;
11465         ret = get_errno(sys_getcwd1(p, arg2));
11466         unlock_user(p, arg1, ret);
11467         return ret;
11468     case TARGET_NR_capget:
11469     case TARGET_NR_capset:
11470     {
11471         struct target_user_cap_header *target_header;
11472         struct target_user_cap_data *target_data = NULL;
11473         struct __user_cap_header_struct header;
11474         struct __user_cap_data_struct data[2];
11475         struct __user_cap_data_struct *dataptr = NULL;
11476         int i, target_datalen;
11477         int data_items = 1;
11478 
11479         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11480             return -TARGET_EFAULT;
11481         }
11482         header.version = tswap32(target_header->version);
11483         header.pid = tswap32(target_header->pid);
11484 
11485         if (header.version != _LINUX_CAPABILITY_VERSION) {
11486             /* Version 2 and up takes pointer to two user_data structs */
11487             data_items = 2;
11488         }
11489 
11490         target_datalen = sizeof(*target_data) * data_items;
11491 
11492         if (arg2) {
11493             if (num == TARGET_NR_capget) {
11494                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11495             } else {
11496                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11497             }
11498             if (!target_data) {
11499                 unlock_user_struct(target_header, arg1, 0);
11500                 return -TARGET_EFAULT;
11501             }
11502 
11503             if (num == TARGET_NR_capset) {
11504                 for (i = 0; i < data_items; i++) {
11505                     data[i].effective = tswap32(target_data[i].effective);
11506                     data[i].permitted = tswap32(target_data[i].permitted);
11507                     data[i].inheritable = tswap32(target_data[i].inheritable);
11508                 }
11509             }
11510 
11511             dataptr = data;
11512         }
11513 
11514         if (num == TARGET_NR_capget) {
11515             ret = get_errno(capget(&header, dataptr));
11516         } else {
11517             ret = get_errno(capset(&header, dataptr));
11518         }
11519 
11520         /* The kernel always updates version for both capget and capset */
11521         target_header->version = tswap32(header.version);
11522         unlock_user_struct(target_header, arg1, 1);
11523 
11524         if (arg2) {
11525             if (num == TARGET_NR_capget) {
11526                 for (i = 0; i < data_items; i++) {
11527                     target_data[i].effective = tswap32(data[i].effective);
11528                     target_data[i].permitted = tswap32(data[i].permitted);
11529                     target_data[i].inheritable = tswap32(data[i].inheritable);
11530                 }
11531                 unlock_user(target_data, arg2, target_datalen);
11532             } else {
11533                 unlock_user(target_data, arg2, 0);
11534             }
11535         }
11536         return ret;
11537     }
11538     case TARGET_NR_sigaltstack:
11539         return do_sigaltstack(arg1, arg2, cpu_env);
11540 
11541 #ifdef CONFIG_SENDFILE
11542 #ifdef TARGET_NR_sendfile
11543     case TARGET_NR_sendfile:
11544     {
11545         off_t *offp = NULL;
11546         off_t off;
11547         if (arg3) {
11548             ret = get_user_sal(off, arg3);
11549             if (is_error(ret)) {
11550                 return ret;
11551             }
11552             offp = &off;
11553         }
11554         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11555         if (!is_error(ret) && arg3) {
11556             abi_long ret2 = put_user_sal(off, arg3);
11557             if (is_error(ret2)) {
11558                 ret = ret2;
11559             }
11560         }
11561         return ret;
11562     }
11563 #endif
11564 #ifdef TARGET_NR_sendfile64
11565     case TARGET_NR_sendfile64:
11566     {
11567         off_t *offp = NULL;
11568         off_t off;
11569         if (arg3) {
11570             ret = get_user_s64(off, arg3);
11571             if (is_error(ret)) {
11572                 return ret;
11573             }
11574             offp = &off;
11575         }
11576         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11577         if (!is_error(ret) && arg3) {
11578             abi_long ret2 = put_user_s64(off, arg3);
11579             if (is_error(ret2)) {
11580                 ret = ret2;
11581             }
11582         }
11583         return ret;
11584     }
11585 #endif
11586 #endif
11587 #ifdef TARGET_NR_vfork
11588     case TARGET_NR_vfork:
11589         return get_errno(do_fork(cpu_env,
11590                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11591                          0, 0, 0, 0));
11592 #endif
11593 #ifdef TARGET_NR_ugetrlimit
11594     case TARGET_NR_ugetrlimit:
11595     {
11596 	struct rlimit rlim;
11597 	int resource = target_to_host_resource(arg1);
11598 	ret = get_errno(getrlimit(resource, &rlim));
11599 	if (!is_error(ret)) {
11600 	    struct target_rlimit *target_rlim;
11601             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11602                 return -TARGET_EFAULT;
11603 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11604 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11605             unlock_user_struct(target_rlim, arg2, 1);
11606 	}
11607         return ret;
11608     }
11609 #endif
11610 #ifdef TARGET_NR_truncate64
11611     case TARGET_NR_truncate64:
11612         if (!(p = lock_user_string(arg1)))
11613             return -TARGET_EFAULT;
11614 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11615         unlock_user(p, arg1, 0);
11616         return ret;
11617 #endif
11618 #ifdef TARGET_NR_ftruncate64
11619     case TARGET_NR_ftruncate64:
11620         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11621 #endif
11622 #ifdef TARGET_NR_stat64
11623     case TARGET_NR_stat64:
11624         if (!(p = lock_user_string(arg1))) {
11625             return -TARGET_EFAULT;
11626         }
11627         ret = get_errno(stat(path(p), &st));
11628         unlock_user(p, arg1, 0);
11629         if (!is_error(ret))
11630             ret = host_to_target_stat64(cpu_env, arg2, &st);
11631         return ret;
11632 #endif
11633 #ifdef TARGET_NR_lstat64
11634     case TARGET_NR_lstat64:
11635         if (!(p = lock_user_string(arg1))) {
11636             return -TARGET_EFAULT;
11637         }
11638         ret = get_errno(lstat(path(p), &st));
11639         unlock_user(p, arg1, 0);
11640         if (!is_error(ret))
11641             ret = host_to_target_stat64(cpu_env, arg2, &st);
11642         return ret;
11643 #endif
11644 #ifdef TARGET_NR_fstat64
11645     case TARGET_NR_fstat64:
11646         ret = get_errno(fstat(arg1, &st));
11647         if (!is_error(ret))
11648             ret = host_to_target_stat64(cpu_env, arg2, &st);
11649         return ret;
11650 #endif
11651 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11652 #ifdef TARGET_NR_fstatat64
11653     case TARGET_NR_fstatat64:
11654 #endif
11655 #ifdef TARGET_NR_newfstatat
11656     case TARGET_NR_newfstatat:
11657 #endif
11658         if (!(p = lock_user_string(arg2))) {
11659             return -TARGET_EFAULT;
11660         }
11661         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11662         unlock_user(p, arg2, 0);
11663         if (!is_error(ret))
11664             ret = host_to_target_stat64(cpu_env, arg3, &st);
11665         return ret;
11666 #endif
11667 #if defined(TARGET_NR_statx)
11668     case TARGET_NR_statx:
11669         {
11670             struct target_statx *target_stx;
11671             int dirfd = arg1;
11672             int flags = arg3;
11673 
11674             p = lock_user_string(arg2);
11675             if (p == NULL) {
11676                 return -TARGET_EFAULT;
11677             }
11678 #if defined(__NR_statx)
11679             {
11680                 /*
11681                  * It is assumed that struct statx is architecture independent.
11682                  */
11683                 struct target_statx host_stx;
11684                 int mask = arg4;
11685 
11686                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11687                 if (!is_error(ret)) {
11688                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11689                         unlock_user(p, arg2, 0);
11690                         return -TARGET_EFAULT;
11691                     }
11692                 }
11693 
11694                 if (ret != -TARGET_ENOSYS) {
11695                     unlock_user(p, arg2, 0);
11696                     return ret;
11697                 }
11698             }
11699 #endif
11700             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11701             unlock_user(p, arg2, 0);
11702 
11703             if (!is_error(ret)) {
11704                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11705                     return -TARGET_EFAULT;
11706                 }
11707                 memset(target_stx, 0, sizeof(*target_stx));
11708                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11709                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11710                 __put_user(st.st_ino, &target_stx->stx_ino);
11711                 __put_user(st.st_mode, &target_stx->stx_mode);
11712                 __put_user(st.st_uid, &target_stx->stx_uid);
11713                 __put_user(st.st_gid, &target_stx->stx_gid);
11714                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11715                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11716                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11717                 __put_user(st.st_size, &target_stx->stx_size);
11718                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11719                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11720                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11721                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11722                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11723                 unlock_user_struct(target_stx, arg5, 1);
11724             }
11725         }
11726         return ret;
11727 #endif
11728 #ifdef TARGET_NR_lchown
11729     case TARGET_NR_lchown:
11730         if (!(p = lock_user_string(arg1)))
11731             return -TARGET_EFAULT;
11732         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11733         unlock_user(p, arg1, 0);
11734         return ret;
11735 #endif
11736 #ifdef TARGET_NR_getuid
11737     case TARGET_NR_getuid:
11738         return get_errno(high2lowuid(getuid()));
11739 #endif
11740 #ifdef TARGET_NR_getgid
11741     case TARGET_NR_getgid:
11742         return get_errno(high2lowgid(getgid()));
11743 #endif
11744 #ifdef TARGET_NR_geteuid
11745     case TARGET_NR_geteuid:
11746         return get_errno(high2lowuid(geteuid()));
11747 #endif
11748 #ifdef TARGET_NR_getegid
11749     case TARGET_NR_getegid:
11750         return get_errno(high2lowgid(getegid()));
11751 #endif
11752     case TARGET_NR_setreuid:
11753         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11754     case TARGET_NR_setregid:
11755         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11756     case TARGET_NR_getgroups:
11757         { /* the same code as for TARGET_NR_getgroups32 */
11758             int gidsetsize = arg1;
11759             target_id *target_grouplist;
11760             g_autofree gid_t *grouplist = NULL;
11761             int i;
11762 
11763             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11764                 return -TARGET_EINVAL;
11765             }
11766             if (gidsetsize > 0) {
11767                 grouplist = g_try_new(gid_t, gidsetsize);
11768                 if (!grouplist) {
11769                     return -TARGET_ENOMEM;
11770                 }
11771             }
11772             ret = get_errno(getgroups(gidsetsize, grouplist));
11773             if (!is_error(ret) && gidsetsize > 0) {
11774                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11775                                              gidsetsize * sizeof(target_id), 0);
11776                 if (!target_grouplist) {
11777                     return -TARGET_EFAULT;
11778                 }
11779                 for (i = 0; i < ret; i++) {
11780                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11781                 }
11782                 unlock_user(target_grouplist, arg2,
11783                             gidsetsize * sizeof(target_id));
11784             }
11785             return ret;
11786         }
11787     case TARGET_NR_setgroups:
11788         { /* the same code as for TARGET_NR_setgroups32 */
11789             int gidsetsize = arg1;
11790             target_id *target_grouplist;
11791             g_autofree gid_t *grouplist = NULL;
11792             int i;
11793 
11794             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11795                 return -TARGET_EINVAL;
11796             }
11797             if (gidsetsize > 0) {
11798                 grouplist = g_try_new(gid_t, gidsetsize);
11799                 if (!grouplist) {
11800                     return -TARGET_ENOMEM;
11801                 }
11802                 target_grouplist = lock_user(VERIFY_READ, arg2,
11803                                              gidsetsize * sizeof(target_id), 1);
11804                 if (!target_grouplist) {
11805                     return -TARGET_EFAULT;
11806                 }
11807                 for (i = 0; i < gidsetsize; i++) {
11808                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11809                 }
11810                 unlock_user(target_grouplist, arg2,
11811                             gidsetsize * sizeof(target_id));
11812             }
11813             return get_errno(setgroups(gidsetsize, grouplist));
11814         }
11815     case TARGET_NR_fchown:
11816         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11817 #if defined(TARGET_NR_fchownat)
11818     case TARGET_NR_fchownat:
11819         if (!(p = lock_user_string(arg2)))
11820             return -TARGET_EFAULT;
11821         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11822                                  low2highgid(arg4), arg5));
11823         unlock_user(p, arg2, 0);
11824         return ret;
11825 #endif
11826 #ifdef TARGET_NR_setresuid
11827     case TARGET_NR_setresuid:
11828         return get_errno(sys_setresuid(low2highuid(arg1),
11829                                        low2highuid(arg2),
11830                                        low2highuid(arg3)));
11831 #endif
11832 #ifdef TARGET_NR_getresuid
11833     case TARGET_NR_getresuid:
11834         {
11835             uid_t ruid, euid, suid;
11836             ret = get_errno(getresuid(&ruid, &euid, &suid));
11837             if (!is_error(ret)) {
11838                 if (put_user_id(high2lowuid(ruid), arg1)
11839                     || put_user_id(high2lowuid(euid), arg2)
11840                     || put_user_id(high2lowuid(suid), arg3))
11841                     return -TARGET_EFAULT;
11842             }
11843         }
11844         return ret;
11845 #endif
11846 #ifdef TARGET_NR_getresgid
11847     case TARGET_NR_setresgid:
11848         return get_errno(sys_setresgid(low2highgid(arg1),
11849                                        low2highgid(arg2),
11850                                        low2highgid(arg3)));
11851 #endif
11852 #ifdef TARGET_NR_getresgid
11853     case TARGET_NR_getresgid:
11854         {
11855             gid_t rgid, egid, sgid;
11856             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11857             if (!is_error(ret)) {
11858                 if (put_user_id(high2lowgid(rgid), arg1)
11859                     || put_user_id(high2lowgid(egid), arg2)
11860                     || put_user_id(high2lowgid(sgid), arg3))
11861                     return -TARGET_EFAULT;
11862             }
11863         }
11864         return ret;
11865 #endif
11866 #ifdef TARGET_NR_chown
11867     case TARGET_NR_chown:
11868         if (!(p = lock_user_string(arg1)))
11869             return -TARGET_EFAULT;
11870         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11871         unlock_user(p, arg1, 0);
11872         return ret;
11873 #endif
11874     case TARGET_NR_setuid:
11875         return get_errno(sys_setuid(low2highuid(arg1)));
11876     case TARGET_NR_setgid:
11877         return get_errno(sys_setgid(low2highgid(arg1)));
11878     case TARGET_NR_setfsuid:
11879         return get_errno(setfsuid(arg1));
11880     case TARGET_NR_setfsgid:
11881         return get_errno(setfsgid(arg1));
11882 
11883 #ifdef TARGET_NR_lchown32
11884     case TARGET_NR_lchown32:
11885         if (!(p = lock_user_string(arg1)))
11886             return -TARGET_EFAULT;
11887         ret = get_errno(lchown(p, arg2, arg3));
11888         unlock_user(p, arg1, 0);
11889         return ret;
11890 #endif
11891 #ifdef TARGET_NR_getuid32
11892     case TARGET_NR_getuid32:
11893         return get_errno(getuid());
11894 #endif
11895 
11896 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11897    /* Alpha specific */
11898     case TARGET_NR_getxuid:
11899          {
11900             uid_t euid;
11901             euid=geteuid();
11902             cpu_env->ir[IR_A4]=euid;
11903          }
11904         return get_errno(getuid());
11905 #endif
11906 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11907    /* Alpha specific */
11908     case TARGET_NR_getxgid:
11909          {
11910             uid_t egid;
11911             egid=getegid();
11912             cpu_env->ir[IR_A4]=egid;
11913          }
11914         return get_errno(getgid());
11915 #endif
11916 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11917     /* Alpha specific */
11918     case TARGET_NR_osf_getsysinfo:
11919         ret = -TARGET_EOPNOTSUPP;
11920         switch (arg1) {
11921           case TARGET_GSI_IEEE_FP_CONTROL:
11922             {
11923                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11924                 uint64_t swcr = cpu_env->swcr;
11925 
11926                 swcr &= ~SWCR_STATUS_MASK;
11927                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11928 
11929                 if (put_user_u64 (swcr, arg2))
11930                         return -TARGET_EFAULT;
11931                 ret = 0;
11932             }
11933             break;
11934 
11935           /* case GSI_IEEE_STATE_AT_SIGNAL:
11936              -- Not implemented in linux kernel.
11937              case GSI_UACPROC:
11938              -- Retrieves current unaligned access state; not much used.
11939              case GSI_PROC_TYPE:
11940              -- Retrieves implver information; surely not used.
11941              case GSI_GET_HWRPB:
11942              -- Grabs a copy of the HWRPB; surely not used.
11943           */
11944         }
11945         return ret;
11946 #endif
11947 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11948     /* Alpha specific */
11949     case TARGET_NR_osf_setsysinfo:
11950         ret = -TARGET_EOPNOTSUPP;
11951         switch (arg1) {
11952           case TARGET_SSI_IEEE_FP_CONTROL:
11953             {
11954                 uint64_t swcr, fpcr;
11955 
11956                 if (get_user_u64 (swcr, arg2)) {
11957                     return -TARGET_EFAULT;
11958                 }
11959 
11960                 /*
11961                  * The kernel calls swcr_update_status to update the
11962                  * status bits from the fpcr at every point that it
11963                  * could be queried.  Therefore, we store the status
11964                  * bits only in FPCR.
11965                  */
11966                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11967 
11968                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11969                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11970                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11971                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11972                 ret = 0;
11973             }
11974             break;
11975 
11976           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11977             {
11978                 uint64_t exc, fpcr, fex;
11979 
11980                 if (get_user_u64(exc, arg2)) {
11981                     return -TARGET_EFAULT;
11982                 }
11983                 exc &= SWCR_STATUS_MASK;
11984                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11985 
11986                 /* Old exceptions are not signaled.  */
11987                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11988                 fex = exc & ~fex;
11989                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11990                 fex &= (cpu_env)->swcr;
11991 
11992                 /* Update the hardware fpcr.  */
11993                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11994                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11995 
11996                 if (fex) {
11997                     int si_code = TARGET_FPE_FLTUNK;
11998                     target_siginfo_t info;
11999 
12000                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12001                         si_code = TARGET_FPE_FLTUND;
12002                     }
12003                     if (fex & SWCR_TRAP_ENABLE_INE) {
12004                         si_code = TARGET_FPE_FLTRES;
12005                     }
12006                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12007                         si_code = TARGET_FPE_FLTUND;
12008                     }
12009                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12010                         si_code = TARGET_FPE_FLTOVF;
12011                     }
12012                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12013                         si_code = TARGET_FPE_FLTDIV;
12014                     }
12015                     if (fex & SWCR_TRAP_ENABLE_INV) {
12016                         si_code = TARGET_FPE_FLTINV;
12017                     }
12018 
12019                     info.si_signo = SIGFPE;
12020                     info.si_errno = 0;
12021                     info.si_code = si_code;
12022                     info._sifields._sigfault._addr = (cpu_env)->pc;
12023                     queue_signal(cpu_env, info.si_signo,
12024                                  QEMU_SI_FAULT, &info);
12025                 }
12026                 ret = 0;
12027             }
12028             break;
12029 
12030           /* case SSI_NVPAIRS:
12031              -- Used with SSIN_UACPROC to enable unaligned accesses.
12032              case SSI_IEEE_STATE_AT_SIGNAL:
12033              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12034              -- Not implemented in linux kernel
12035           */
12036         }
12037         return ret;
12038 #endif
12039 #ifdef TARGET_NR_osf_sigprocmask
12040     /* Alpha specific.  */
12041     case TARGET_NR_osf_sigprocmask:
12042         {
12043             abi_ulong mask;
12044             int how;
12045             sigset_t set, oldset;
12046 
12047             switch(arg1) {
12048             case TARGET_SIG_BLOCK:
12049                 how = SIG_BLOCK;
12050                 break;
12051             case TARGET_SIG_UNBLOCK:
12052                 how = SIG_UNBLOCK;
12053                 break;
12054             case TARGET_SIG_SETMASK:
12055                 how = SIG_SETMASK;
12056                 break;
12057             default:
12058                 return -TARGET_EINVAL;
12059             }
12060             mask = arg2;
12061             target_to_host_old_sigset(&set, &mask);
12062             ret = do_sigprocmask(how, &set, &oldset);
12063             if (!ret) {
12064                 host_to_target_old_sigset(&mask, &oldset);
12065                 ret = mask;
12066             }
12067         }
12068         return ret;
12069 #endif
12070 
12071 #ifdef TARGET_NR_getgid32
12072     case TARGET_NR_getgid32:
12073         return get_errno(getgid());
12074 #endif
12075 #ifdef TARGET_NR_geteuid32
12076     case TARGET_NR_geteuid32:
12077         return get_errno(geteuid());
12078 #endif
12079 #ifdef TARGET_NR_getegid32
12080     case TARGET_NR_getegid32:
12081         return get_errno(getegid());
12082 #endif
12083 #ifdef TARGET_NR_setreuid32
12084     case TARGET_NR_setreuid32:
12085         return get_errno(setreuid(arg1, arg2));
12086 #endif
12087 #ifdef TARGET_NR_setregid32
12088     case TARGET_NR_setregid32:
12089         return get_errno(setregid(arg1, arg2));
12090 #endif
12091 #ifdef TARGET_NR_getgroups32
12092     case TARGET_NR_getgroups32:
12093         { /* the same code as for TARGET_NR_getgroups */
12094             int gidsetsize = arg1;
12095             uint32_t *target_grouplist;
12096             g_autofree gid_t *grouplist = NULL;
12097             int i;
12098 
12099             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12100                 return -TARGET_EINVAL;
12101             }
12102             if (gidsetsize > 0) {
12103                 grouplist = g_try_new(gid_t, gidsetsize);
12104                 if (!grouplist) {
12105                     return -TARGET_ENOMEM;
12106                 }
12107             }
12108             ret = get_errno(getgroups(gidsetsize, grouplist));
12109             if (!is_error(ret) && gidsetsize > 0) {
12110                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12111                                              gidsetsize * 4, 0);
12112                 if (!target_grouplist) {
12113                     return -TARGET_EFAULT;
12114                 }
12115                 for (i = 0; i < ret; i++) {
12116                     target_grouplist[i] = tswap32(grouplist[i]);
12117                 }
12118                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12119             }
12120             return ret;
12121         }
12122 #endif
12123 #ifdef TARGET_NR_setgroups32
12124     case TARGET_NR_setgroups32:
12125         { /* the same code as for TARGET_NR_setgroups */
12126             int gidsetsize = arg1;
12127             uint32_t *target_grouplist;
12128             g_autofree gid_t *grouplist = NULL;
12129             int i;
12130 
12131             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12132                 return -TARGET_EINVAL;
12133             }
12134             if (gidsetsize > 0) {
12135                 grouplist = g_try_new(gid_t, gidsetsize);
12136                 if (!grouplist) {
12137                     return -TARGET_ENOMEM;
12138                 }
12139                 target_grouplist = lock_user(VERIFY_READ, arg2,
12140                                              gidsetsize * 4, 1);
12141                 if (!target_grouplist) {
12142                     return -TARGET_EFAULT;
12143                 }
12144                 for (i = 0; i < gidsetsize; i++) {
12145                     grouplist[i] = tswap32(target_grouplist[i]);
12146                 }
12147                 unlock_user(target_grouplist, arg2, 0);
12148             }
12149             return get_errno(setgroups(gidsetsize, grouplist));
12150         }
12151 #endif
12152 #ifdef TARGET_NR_fchown32
12153     case TARGET_NR_fchown32:
12154         return get_errno(fchown(arg1, arg2, arg3));
12155 #endif
12156 #ifdef TARGET_NR_setresuid32
12157     case TARGET_NR_setresuid32:
12158         return get_errno(sys_setresuid(arg1, arg2, arg3));
12159 #endif
12160 #ifdef TARGET_NR_getresuid32
12161     case TARGET_NR_getresuid32:
12162         {
12163             uid_t ruid, euid, suid;
12164             ret = get_errno(getresuid(&ruid, &euid, &suid));
12165             if (!is_error(ret)) {
12166                 if (put_user_u32(ruid, arg1)
12167                     || put_user_u32(euid, arg2)
12168                     || put_user_u32(suid, arg3))
12169                     return -TARGET_EFAULT;
12170             }
12171         }
12172         return ret;
12173 #endif
12174 #ifdef TARGET_NR_setresgid32
12175     case TARGET_NR_setresgid32:
12176         return get_errno(sys_setresgid(arg1, arg2, arg3));
12177 #endif
12178 #ifdef TARGET_NR_getresgid32
12179     case TARGET_NR_getresgid32:
12180         {
12181             gid_t rgid, egid, sgid;
12182             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12183             if (!is_error(ret)) {
12184                 if (put_user_u32(rgid, arg1)
12185                     || put_user_u32(egid, arg2)
12186                     || put_user_u32(sgid, arg3))
12187                     return -TARGET_EFAULT;
12188             }
12189         }
12190         return ret;
12191 #endif
12192 #ifdef TARGET_NR_chown32
12193     case TARGET_NR_chown32:
12194         if (!(p = lock_user_string(arg1)))
12195             return -TARGET_EFAULT;
12196         ret = get_errno(chown(p, arg2, arg3));
12197         unlock_user(p, arg1, 0);
12198         return ret;
12199 #endif
12200 #ifdef TARGET_NR_setuid32
12201     case TARGET_NR_setuid32:
12202         return get_errno(sys_setuid(arg1));
12203 #endif
12204 #ifdef TARGET_NR_setgid32
12205     case TARGET_NR_setgid32:
12206         return get_errno(sys_setgid(arg1));
12207 #endif
12208 #ifdef TARGET_NR_setfsuid32
12209     case TARGET_NR_setfsuid32:
12210         return get_errno(setfsuid(arg1));
12211 #endif
12212 #ifdef TARGET_NR_setfsgid32
12213     case TARGET_NR_setfsgid32:
12214         return get_errno(setfsgid(arg1));
12215 #endif
12216 #ifdef TARGET_NR_mincore
12217     case TARGET_NR_mincore:
12218         {
12219             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12220             if (!a) {
12221                 return -TARGET_ENOMEM;
12222             }
12223             p = lock_user_string(arg3);
12224             if (!p) {
12225                 ret = -TARGET_EFAULT;
12226             } else {
12227                 ret = get_errno(mincore(a, arg2, p));
12228                 unlock_user(p, arg3, ret);
12229             }
12230             unlock_user(a, arg1, 0);
12231         }
12232         return ret;
12233 #endif
12234 #ifdef TARGET_NR_arm_fadvise64_64
12235     case TARGET_NR_arm_fadvise64_64:
12236         /* arm_fadvise64_64 looks like fadvise64_64 but
12237          * with different argument order: fd, advice, offset, len
12238          * rather than the usual fd, offset, len, advice.
12239          * Note that offset and len are both 64-bit so appear as
12240          * pairs of 32-bit registers.
12241          */
12242         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12243                             target_offset64(arg5, arg6), arg2);
12244         return -host_to_target_errno(ret);
12245 #endif
12246 
12247 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12248 
12249 #ifdef TARGET_NR_fadvise64_64
12250     case TARGET_NR_fadvise64_64:
12251 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12252         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12253         ret = arg2;
12254         arg2 = arg3;
12255         arg3 = arg4;
12256         arg4 = arg5;
12257         arg5 = arg6;
12258         arg6 = ret;
12259 #else
12260         /* 6 args: fd, offset (high, low), len (high, low), advice */
12261         if (regpairs_aligned(cpu_env, num)) {
12262             /* offset is in (3,4), len in (5,6) and advice in 7 */
12263             arg2 = arg3;
12264             arg3 = arg4;
12265             arg4 = arg5;
12266             arg5 = arg6;
12267             arg6 = arg7;
12268         }
12269 #endif
12270         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12271                             target_offset64(arg4, arg5), arg6);
12272         return -host_to_target_errno(ret);
12273 #endif
12274 
12275 #ifdef TARGET_NR_fadvise64
12276     case TARGET_NR_fadvise64:
12277         /* 5 args: fd, offset (high, low), len, advice */
12278         if (regpairs_aligned(cpu_env, num)) {
12279             /* offset is in (3,4), len in 5 and advice in 6 */
12280             arg2 = arg3;
12281             arg3 = arg4;
12282             arg4 = arg5;
12283             arg5 = arg6;
12284         }
12285         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12286         return -host_to_target_errno(ret);
12287 #endif
12288 
12289 #else /* not a 32-bit ABI */
12290 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12291 #ifdef TARGET_NR_fadvise64_64
12292     case TARGET_NR_fadvise64_64:
12293 #endif
12294 #ifdef TARGET_NR_fadvise64
12295     case TARGET_NR_fadvise64:
12296 #endif
12297 #ifdef TARGET_S390X
12298         switch (arg4) {
12299         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12300         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12301         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12302         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12303         default: break;
12304         }
12305 #endif
12306         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12307 #endif
12308 #endif /* end of 64-bit ABI fadvise handling */
12309 
12310 #ifdef TARGET_NR_madvise
12311     case TARGET_NR_madvise:
12312         return target_madvise(arg1, arg2, arg3);
12313 #endif
12314 #ifdef TARGET_NR_fcntl64
12315     case TARGET_NR_fcntl64:
12316     {
12317         int cmd;
12318         struct flock64 fl;
12319         from_flock64_fn *copyfrom = copy_from_user_flock64;
12320         to_flock64_fn *copyto = copy_to_user_flock64;
12321 
12322 #ifdef TARGET_ARM
12323         if (!cpu_env->eabi) {
12324             copyfrom = copy_from_user_oabi_flock64;
12325             copyto = copy_to_user_oabi_flock64;
12326         }
12327 #endif
12328 
12329         cmd = target_to_host_fcntl_cmd(arg2);
12330         if (cmd == -TARGET_EINVAL) {
12331             return cmd;
12332         }
12333 
12334         switch(arg2) {
12335         case TARGET_F_GETLK64:
12336             ret = copyfrom(&fl, arg3);
12337             if (ret) {
12338                 break;
12339             }
12340             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12341             if (ret == 0) {
12342                 ret = copyto(arg3, &fl);
12343             }
12344 	    break;
12345 
12346         case TARGET_F_SETLK64:
12347         case TARGET_F_SETLKW64:
12348             ret = copyfrom(&fl, arg3);
12349             if (ret) {
12350                 break;
12351             }
12352             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12353 	    break;
12354         default:
12355             ret = do_fcntl(arg1, arg2, arg3);
12356             break;
12357         }
12358         return ret;
12359     }
12360 #endif
12361 #ifdef TARGET_NR_cacheflush
12362     case TARGET_NR_cacheflush:
12363         /* self-modifying code is handled automatically, so nothing needed */
12364         return 0;
12365 #endif
12366 #ifdef TARGET_NR_getpagesize
12367     case TARGET_NR_getpagesize:
12368         return TARGET_PAGE_SIZE;
12369 #endif
12370     case TARGET_NR_gettid:
12371         return get_errno(sys_gettid());
12372 #ifdef TARGET_NR_readahead
12373     case TARGET_NR_readahead:
12374 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12375         if (regpairs_aligned(cpu_env, num)) {
12376             arg2 = arg3;
12377             arg3 = arg4;
12378             arg4 = arg5;
12379         }
12380         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12381 #else
12382         ret = get_errno(readahead(arg1, arg2, arg3));
12383 #endif
12384         return ret;
12385 #endif
12386 #ifdef CONFIG_ATTR
12387 #ifdef TARGET_NR_setxattr
12388     case TARGET_NR_listxattr:
12389     case TARGET_NR_llistxattr:
12390     {
12391         void *p, *b = 0;
12392         if (arg2) {
12393             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12394             if (!b) {
12395                 return -TARGET_EFAULT;
12396             }
12397         }
12398         p = lock_user_string(arg1);
12399         if (p) {
12400             if (num == TARGET_NR_listxattr) {
12401                 ret = get_errno(listxattr(p, b, arg3));
12402             } else {
12403                 ret = get_errno(llistxattr(p, b, arg3));
12404             }
12405         } else {
12406             ret = -TARGET_EFAULT;
12407         }
12408         unlock_user(p, arg1, 0);
12409         unlock_user(b, arg2, arg3);
12410         return ret;
12411     }
12412     case TARGET_NR_flistxattr:
12413     {
12414         void *b = 0;
12415         if (arg2) {
12416             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12417             if (!b) {
12418                 return -TARGET_EFAULT;
12419             }
12420         }
12421         ret = get_errno(flistxattr(arg1, b, arg3));
12422         unlock_user(b, arg2, arg3);
12423         return ret;
12424     }
12425     case TARGET_NR_setxattr:
12426     case TARGET_NR_lsetxattr:
12427         {
12428             void *p, *n, *v = 0;
12429             if (arg3) {
12430                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12431                 if (!v) {
12432                     return -TARGET_EFAULT;
12433                 }
12434             }
12435             p = lock_user_string(arg1);
12436             n = lock_user_string(arg2);
12437             if (p && n) {
12438                 if (num == TARGET_NR_setxattr) {
12439                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12440                 } else {
12441                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12442                 }
12443             } else {
12444                 ret = -TARGET_EFAULT;
12445             }
12446             unlock_user(p, arg1, 0);
12447             unlock_user(n, arg2, 0);
12448             unlock_user(v, arg3, 0);
12449         }
12450         return ret;
12451     case TARGET_NR_fsetxattr:
12452         {
12453             void *n, *v = 0;
12454             if (arg3) {
12455                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12456                 if (!v) {
12457                     return -TARGET_EFAULT;
12458                 }
12459             }
12460             n = lock_user_string(arg2);
12461             if (n) {
12462                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12463             } else {
12464                 ret = -TARGET_EFAULT;
12465             }
12466             unlock_user(n, arg2, 0);
12467             unlock_user(v, arg3, 0);
12468         }
12469         return ret;
12470     case TARGET_NR_getxattr:
12471     case TARGET_NR_lgetxattr:
12472         {
12473             void *p, *n, *v = 0;
12474             if (arg3) {
12475                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12476                 if (!v) {
12477                     return -TARGET_EFAULT;
12478                 }
12479             }
12480             p = lock_user_string(arg1);
12481             n = lock_user_string(arg2);
12482             if (p && n) {
12483                 if (num == TARGET_NR_getxattr) {
12484                     ret = get_errno(getxattr(p, n, v, arg4));
12485                 } else {
12486                     ret = get_errno(lgetxattr(p, n, v, arg4));
12487                 }
12488             } else {
12489                 ret = -TARGET_EFAULT;
12490             }
12491             unlock_user(p, arg1, 0);
12492             unlock_user(n, arg2, 0);
12493             unlock_user(v, arg3, arg4);
12494         }
12495         return ret;
12496     case TARGET_NR_fgetxattr:
12497         {
12498             void *n, *v = 0;
12499             if (arg3) {
12500                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12501                 if (!v) {
12502                     return -TARGET_EFAULT;
12503                 }
12504             }
12505             n = lock_user_string(arg2);
12506             if (n) {
12507                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12508             } else {
12509                 ret = -TARGET_EFAULT;
12510             }
12511             unlock_user(n, arg2, 0);
12512             unlock_user(v, arg3, arg4);
12513         }
12514         return ret;
12515     case TARGET_NR_removexattr:
12516     case TARGET_NR_lremovexattr:
12517         {
12518             void *p, *n;
12519             p = lock_user_string(arg1);
12520             n = lock_user_string(arg2);
12521             if (p && n) {
12522                 if (num == TARGET_NR_removexattr) {
12523                     ret = get_errno(removexattr(p, n));
12524                 } else {
12525                     ret = get_errno(lremovexattr(p, n));
12526                 }
12527             } else {
12528                 ret = -TARGET_EFAULT;
12529             }
12530             unlock_user(p, arg1, 0);
12531             unlock_user(n, arg2, 0);
12532         }
12533         return ret;
12534     case TARGET_NR_fremovexattr:
12535         {
12536             void *n;
12537             n = lock_user_string(arg2);
12538             if (n) {
12539                 ret = get_errno(fremovexattr(arg1, n));
12540             } else {
12541                 ret = -TARGET_EFAULT;
12542             }
12543             unlock_user(n, arg2, 0);
12544         }
12545         return ret;
12546 #endif
12547 #endif /* CONFIG_ATTR */
12548 #ifdef TARGET_NR_set_thread_area
12549     case TARGET_NR_set_thread_area:
12550 #if defined(TARGET_MIPS)
12551       cpu_env->active_tc.CP0_UserLocal = arg1;
12552       return 0;
12553 #elif defined(TARGET_CRIS)
12554       if (arg1 & 0xff)
12555           ret = -TARGET_EINVAL;
12556       else {
12557           cpu_env->pregs[PR_PID] = arg1;
12558           ret = 0;
12559       }
12560       return ret;
12561 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12562       return do_set_thread_area(cpu_env, arg1);
12563 #elif defined(TARGET_M68K)
12564       {
12565           TaskState *ts = cpu->opaque;
12566           ts->tp_value = arg1;
12567           return 0;
12568       }
12569 #else
12570       return -TARGET_ENOSYS;
12571 #endif
12572 #endif
12573 #ifdef TARGET_NR_get_thread_area
12574     case TARGET_NR_get_thread_area:
12575 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12576         return do_get_thread_area(cpu_env, arg1);
12577 #elif defined(TARGET_M68K)
12578         {
12579             TaskState *ts = cpu->opaque;
12580             return ts->tp_value;
12581         }
12582 #else
12583         return -TARGET_ENOSYS;
12584 #endif
12585 #endif
12586 #ifdef TARGET_NR_getdomainname
12587     case TARGET_NR_getdomainname:
12588         return -TARGET_ENOSYS;
12589 #endif
12590 
12591 #ifdef TARGET_NR_clock_settime
12592     case TARGET_NR_clock_settime:
12593     {
12594         struct timespec ts;
12595 
12596         ret = target_to_host_timespec(&ts, arg2);
12597         if (!is_error(ret)) {
12598             ret = get_errno(clock_settime(arg1, &ts));
12599         }
12600         return ret;
12601     }
12602 #endif
12603 #ifdef TARGET_NR_clock_settime64
12604     case TARGET_NR_clock_settime64:
12605     {
12606         struct timespec ts;
12607 
12608         ret = target_to_host_timespec64(&ts, arg2);
12609         if (!is_error(ret)) {
12610             ret = get_errno(clock_settime(arg1, &ts));
12611         }
12612         return ret;
12613     }
12614 #endif
12615 #ifdef TARGET_NR_clock_gettime
12616     case TARGET_NR_clock_gettime:
12617     {
12618         struct timespec ts;
12619         ret = get_errno(clock_gettime(arg1, &ts));
12620         if (!is_error(ret)) {
12621             ret = host_to_target_timespec(arg2, &ts);
12622         }
12623         return ret;
12624     }
12625 #endif
12626 #ifdef TARGET_NR_clock_gettime64
12627     case TARGET_NR_clock_gettime64:
12628     {
12629         struct timespec ts;
12630         ret = get_errno(clock_gettime(arg1, &ts));
12631         if (!is_error(ret)) {
12632             ret = host_to_target_timespec64(arg2, &ts);
12633         }
12634         return ret;
12635     }
12636 #endif
12637 #ifdef TARGET_NR_clock_getres
12638     case TARGET_NR_clock_getres:
12639     {
12640         struct timespec ts;
12641         ret = get_errno(clock_getres(arg1, &ts));
12642         if (!is_error(ret)) {
12643             host_to_target_timespec(arg2, &ts);
12644         }
12645         return ret;
12646     }
12647 #endif
12648 #ifdef TARGET_NR_clock_getres_time64
12649     case TARGET_NR_clock_getres_time64:
12650     {
12651         struct timespec ts;
12652         ret = get_errno(clock_getres(arg1, &ts));
12653         if (!is_error(ret)) {
12654             host_to_target_timespec64(arg2, &ts);
12655         }
12656         return ret;
12657     }
12658 #endif
12659 #ifdef TARGET_NR_clock_nanosleep
12660     case TARGET_NR_clock_nanosleep:
12661     {
12662         struct timespec ts;
12663         if (target_to_host_timespec(&ts, arg3)) {
12664             return -TARGET_EFAULT;
12665         }
12666         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12667                                              &ts, arg4 ? &ts : NULL));
12668         /*
12669          * if the call is interrupted by a signal handler, it fails
12670          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12671          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12672          */
12673         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12674             host_to_target_timespec(arg4, &ts)) {
12675               return -TARGET_EFAULT;
12676         }
12677 
12678         return ret;
12679     }
12680 #endif
12681 #ifdef TARGET_NR_clock_nanosleep_time64
12682     case TARGET_NR_clock_nanosleep_time64:
12683     {
12684         struct timespec ts;
12685 
12686         if (target_to_host_timespec64(&ts, arg3)) {
12687             return -TARGET_EFAULT;
12688         }
12689 
12690         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12691                                              &ts, arg4 ? &ts : NULL));
12692 
12693         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12694             host_to_target_timespec64(arg4, &ts)) {
12695             return -TARGET_EFAULT;
12696         }
12697         return ret;
12698     }
12699 #endif
12700 
12701 #if defined(TARGET_NR_set_tid_address)
12702     case TARGET_NR_set_tid_address:
12703     {
12704         TaskState *ts = cpu->opaque;
12705         ts->child_tidptr = arg1;
12706         /* do not call host set_tid_address() syscall, instead return tid() */
12707         return get_errno(sys_gettid());
12708     }
12709 #endif
12710 
12711     case TARGET_NR_tkill:
12712         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12713 
12714     case TARGET_NR_tgkill:
12715         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12716                          target_to_host_signal(arg3)));
12717 
12718 #ifdef TARGET_NR_set_robust_list
12719     case TARGET_NR_set_robust_list:
12720     case TARGET_NR_get_robust_list:
12721         /* The ABI for supporting robust futexes has userspace pass
12722          * the kernel a pointer to a linked list which is updated by
12723          * userspace after the syscall; the list is walked by the kernel
12724          * when the thread exits. Since the linked list in QEMU guest
12725          * memory isn't a valid linked list for the host and we have
12726          * no way to reliably intercept the thread-death event, we can't
12727          * support these. Silently return ENOSYS so that guest userspace
12728          * falls back to a non-robust futex implementation (which should
12729          * be OK except in the corner case of the guest crashing while
12730          * holding a mutex that is shared with another process via
12731          * shared memory).
12732          */
12733         return -TARGET_ENOSYS;
12734 #endif
12735 
12736 #if defined(TARGET_NR_utimensat)
12737     case TARGET_NR_utimensat:
12738         {
12739             struct timespec *tsp, ts[2];
12740             if (!arg3) {
12741                 tsp = NULL;
12742             } else {
12743                 if (target_to_host_timespec(ts, arg3)) {
12744                     return -TARGET_EFAULT;
12745                 }
12746                 if (target_to_host_timespec(ts + 1, arg3 +
12747                                             sizeof(struct target_timespec))) {
12748                     return -TARGET_EFAULT;
12749                 }
12750                 tsp = ts;
12751             }
12752             if (!arg2)
12753                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12754             else {
12755                 if (!(p = lock_user_string(arg2))) {
12756                     return -TARGET_EFAULT;
12757                 }
12758                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12759                 unlock_user(p, arg2, 0);
12760             }
12761         }
12762         return ret;
12763 #endif
12764 #ifdef TARGET_NR_utimensat_time64
12765     case TARGET_NR_utimensat_time64:
12766         {
12767             struct timespec *tsp, ts[2];
12768             if (!arg3) {
12769                 tsp = NULL;
12770             } else {
12771                 if (target_to_host_timespec64(ts, arg3)) {
12772                     return -TARGET_EFAULT;
12773                 }
12774                 if (target_to_host_timespec64(ts + 1, arg3 +
12775                                      sizeof(struct target__kernel_timespec))) {
12776                     return -TARGET_EFAULT;
12777                 }
12778                 tsp = ts;
12779             }
12780             if (!arg2)
12781                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12782             else {
12783                 p = lock_user_string(arg2);
12784                 if (!p) {
12785                     return -TARGET_EFAULT;
12786                 }
12787                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12788                 unlock_user(p, arg2, 0);
12789             }
12790         }
12791         return ret;
12792 #endif
12793 #ifdef TARGET_NR_futex
12794     case TARGET_NR_futex:
12795         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12796 #endif
12797 #ifdef TARGET_NR_futex_time64
12798     case TARGET_NR_futex_time64:
12799         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12800 #endif
12801 #ifdef CONFIG_INOTIFY
12802 #if defined(TARGET_NR_inotify_init)
12803     case TARGET_NR_inotify_init:
12804         ret = get_errno(inotify_init());
12805         if (ret >= 0) {
12806             fd_trans_register(ret, &target_inotify_trans);
12807         }
12808         return ret;
12809 #endif
12810 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12811     case TARGET_NR_inotify_init1:
12812         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12813                                           fcntl_flags_tbl)));
12814         if (ret >= 0) {
12815             fd_trans_register(ret, &target_inotify_trans);
12816         }
12817         return ret;
12818 #endif
12819 #if defined(TARGET_NR_inotify_add_watch)
12820     case TARGET_NR_inotify_add_watch:
12821         p = lock_user_string(arg2);
12822         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12823         unlock_user(p, arg2, 0);
12824         return ret;
12825 #endif
12826 #if defined(TARGET_NR_inotify_rm_watch)
12827     case TARGET_NR_inotify_rm_watch:
12828         return get_errno(inotify_rm_watch(arg1, arg2));
12829 #endif
12830 #endif
12831 
12832 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12833     case TARGET_NR_mq_open:
12834         {
12835             struct mq_attr posix_mq_attr;
12836             struct mq_attr *pposix_mq_attr;
12837             int host_flags;
12838 
12839             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12840             pposix_mq_attr = NULL;
12841             if (arg4) {
12842                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12843                     return -TARGET_EFAULT;
12844                 }
12845                 pposix_mq_attr = &posix_mq_attr;
12846             }
12847             p = lock_user_string(arg1 - 1);
12848             if (!p) {
12849                 return -TARGET_EFAULT;
12850             }
12851             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12852             unlock_user (p, arg1, 0);
12853         }
12854         return ret;
12855 
12856     case TARGET_NR_mq_unlink:
12857         p = lock_user_string(arg1 - 1);
12858         if (!p) {
12859             return -TARGET_EFAULT;
12860         }
12861         ret = get_errno(mq_unlink(p));
12862         unlock_user (p, arg1, 0);
12863         return ret;
12864 
12865 #ifdef TARGET_NR_mq_timedsend
12866     case TARGET_NR_mq_timedsend:
12867         {
12868             struct timespec ts;
12869 
12870             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12871             if (arg5 != 0) {
12872                 if (target_to_host_timespec(&ts, arg5)) {
12873                     return -TARGET_EFAULT;
12874                 }
12875                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12876                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12877                     return -TARGET_EFAULT;
12878                 }
12879             } else {
12880                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12881             }
12882             unlock_user (p, arg2, arg3);
12883         }
12884         return ret;
12885 #endif
12886 #ifdef TARGET_NR_mq_timedsend_time64
12887     case TARGET_NR_mq_timedsend_time64:
12888         {
12889             struct timespec ts;
12890 
12891             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12892             if (arg5 != 0) {
12893                 if (target_to_host_timespec64(&ts, arg5)) {
12894                     return -TARGET_EFAULT;
12895                 }
12896                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12897                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12898                     return -TARGET_EFAULT;
12899                 }
12900             } else {
12901                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12902             }
12903             unlock_user(p, arg2, arg3);
12904         }
12905         return ret;
12906 #endif
12907 
12908 #ifdef TARGET_NR_mq_timedreceive
12909     case TARGET_NR_mq_timedreceive:
12910         {
12911             struct timespec ts;
12912             unsigned int prio;
12913 
12914             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12915             if (arg5 != 0) {
12916                 if (target_to_host_timespec(&ts, arg5)) {
12917                     return -TARGET_EFAULT;
12918                 }
12919                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12920                                                      &prio, &ts));
12921                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12922                     return -TARGET_EFAULT;
12923                 }
12924             } else {
12925                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12926                                                      &prio, NULL));
12927             }
12928             unlock_user (p, arg2, arg3);
12929             if (arg4 != 0)
12930                 put_user_u32(prio, arg4);
12931         }
12932         return ret;
12933 #endif
12934 #ifdef TARGET_NR_mq_timedreceive_time64
12935     case TARGET_NR_mq_timedreceive_time64:
12936         {
12937             struct timespec ts;
12938             unsigned int prio;
12939 
12940             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12941             if (arg5 != 0) {
12942                 if (target_to_host_timespec64(&ts, arg5)) {
12943                     return -TARGET_EFAULT;
12944                 }
12945                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12946                                                      &prio, &ts));
12947                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12948                     return -TARGET_EFAULT;
12949                 }
12950             } else {
12951                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12952                                                      &prio, NULL));
12953             }
12954             unlock_user(p, arg2, arg3);
12955             if (arg4 != 0) {
12956                 put_user_u32(prio, arg4);
12957             }
12958         }
12959         return ret;
12960 #endif
12961 
12962     /* Not implemented for now... */
12963 /*     case TARGET_NR_mq_notify: */
12964 /*         break; */
12965 
12966     case TARGET_NR_mq_getsetattr:
12967         {
12968             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12969             ret = 0;
12970             if (arg2 != 0) {
12971                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12972                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12973                                            &posix_mq_attr_out));
12974             } else if (arg3 != 0) {
12975                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12976             }
12977             if (ret == 0 && arg3 != 0) {
12978                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12979             }
12980         }
12981         return ret;
12982 #endif
12983 
12984 #ifdef CONFIG_SPLICE
12985 #ifdef TARGET_NR_tee
12986     case TARGET_NR_tee:
12987         {
12988             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12989         }
12990         return ret;
12991 #endif
12992 #ifdef TARGET_NR_splice
12993     case TARGET_NR_splice:
12994         {
12995             loff_t loff_in, loff_out;
12996             loff_t *ploff_in = NULL, *ploff_out = NULL;
12997             if (arg2) {
12998                 if (get_user_u64(loff_in, arg2)) {
12999                     return -TARGET_EFAULT;
13000                 }
13001                 ploff_in = &loff_in;
13002             }
13003             if (arg4) {
13004                 if (get_user_u64(loff_out, arg4)) {
13005                     return -TARGET_EFAULT;
13006                 }
13007                 ploff_out = &loff_out;
13008             }
13009             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13010             if (arg2) {
13011                 if (put_user_u64(loff_in, arg2)) {
13012                     return -TARGET_EFAULT;
13013                 }
13014             }
13015             if (arg4) {
13016                 if (put_user_u64(loff_out, arg4)) {
13017                     return -TARGET_EFAULT;
13018                 }
13019             }
13020         }
13021         return ret;
13022 #endif
13023 #ifdef TARGET_NR_vmsplice
13024 	case TARGET_NR_vmsplice:
13025         {
13026             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13027             if (vec != NULL) {
13028                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13029                 unlock_iovec(vec, arg2, arg3, 0);
13030             } else {
13031                 ret = -host_to_target_errno(errno);
13032             }
13033         }
13034         return ret;
13035 #endif
13036 #endif /* CONFIG_SPLICE */
13037 #ifdef CONFIG_EVENTFD
13038 #if defined(TARGET_NR_eventfd)
13039     case TARGET_NR_eventfd:
13040         ret = get_errno(eventfd(arg1, 0));
13041         if (ret >= 0) {
13042             fd_trans_register(ret, &target_eventfd_trans);
13043         }
13044         return ret;
13045 #endif
13046 #if defined(TARGET_NR_eventfd2)
13047     case TARGET_NR_eventfd2:
13048     {
13049         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13050         if (arg2 & TARGET_O_NONBLOCK) {
13051             host_flags |= O_NONBLOCK;
13052         }
13053         if (arg2 & TARGET_O_CLOEXEC) {
13054             host_flags |= O_CLOEXEC;
13055         }
13056         ret = get_errno(eventfd(arg1, host_flags));
13057         if (ret >= 0) {
13058             fd_trans_register(ret, &target_eventfd_trans);
13059         }
13060         return ret;
13061     }
13062 #endif
13063 #endif /* CONFIG_EVENTFD  */
13064 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13065     case TARGET_NR_fallocate:
13066 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13067         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13068                                   target_offset64(arg5, arg6)));
13069 #else
13070         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13071 #endif
13072         return ret;
13073 #endif
13074 #if defined(CONFIG_SYNC_FILE_RANGE)
13075 #if defined(TARGET_NR_sync_file_range)
13076     case TARGET_NR_sync_file_range:
13077 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13078 #if defined(TARGET_MIPS)
13079         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13080                                         target_offset64(arg5, arg6), arg7));
13081 #else
13082         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13083                                         target_offset64(arg4, arg5), arg6));
13084 #endif /* !TARGET_MIPS */
13085 #else
13086         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13087 #endif
13088         return ret;
13089 #endif
13090 #if defined(TARGET_NR_sync_file_range2) || \
13091     defined(TARGET_NR_arm_sync_file_range)
13092 #if defined(TARGET_NR_sync_file_range2)
13093     case TARGET_NR_sync_file_range2:
13094 #endif
13095 #if defined(TARGET_NR_arm_sync_file_range)
13096     case TARGET_NR_arm_sync_file_range:
13097 #endif
13098         /* This is like sync_file_range but the arguments are reordered */
13099 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13100         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13101                                         target_offset64(arg5, arg6), arg2));
13102 #else
13103         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13104 #endif
13105         return ret;
13106 #endif
13107 #endif
13108 #if defined(TARGET_NR_signalfd4)
13109     case TARGET_NR_signalfd4:
13110         return do_signalfd4(arg1, arg2, arg4);
13111 #endif
13112 #if defined(TARGET_NR_signalfd)
13113     case TARGET_NR_signalfd:
13114         return do_signalfd4(arg1, arg2, 0);
13115 #endif
13116 #if defined(CONFIG_EPOLL)
13117 #if defined(TARGET_NR_epoll_create)
13118     case TARGET_NR_epoll_create:
13119         return get_errno(epoll_create(arg1));
13120 #endif
13121 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13122     case TARGET_NR_epoll_create1:
13123         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13124 #endif
13125 #if defined(TARGET_NR_epoll_ctl)
13126     case TARGET_NR_epoll_ctl:
13127     {
13128         struct epoll_event ep;
13129         struct epoll_event *epp = 0;
13130         if (arg4) {
13131             if (arg2 != EPOLL_CTL_DEL) {
13132                 struct target_epoll_event *target_ep;
13133                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13134                     return -TARGET_EFAULT;
13135                 }
13136                 ep.events = tswap32(target_ep->events);
13137                 /*
13138                  * The epoll_data_t union is just opaque data to the kernel,
13139                  * so we transfer all 64 bits across and need not worry what
13140                  * actual data type it is.
13141                  */
13142                 ep.data.u64 = tswap64(target_ep->data.u64);
13143                 unlock_user_struct(target_ep, arg4, 0);
13144             }
13145             /*
13146              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13147              * non-null pointer, even though this argument is ignored.
13148              *
13149              */
13150             epp = &ep;
13151         }
13152         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13153     }
13154 #endif
13155 
13156 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13157 #if defined(TARGET_NR_epoll_wait)
13158     case TARGET_NR_epoll_wait:
13159 #endif
13160 #if defined(TARGET_NR_epoll_pwait)
13161     case TARGET_NR_epoll_pwait:
13162 #endif
13163     {
13164         struct target_epoll_event *target_ep;
13165         struct epoll_event *ep;
13166         int epfd = arg1;
13167         int maxevents = arg3;
13168         int timeout = arg4;
13169 
13170         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13171             return -TARGET_EINVAL;
13172         }
13173 
13174         target_ep = lock_user(VERIFY_WRITE, arg2,
13175                               maxevents * sizeof(struct target_epoll_event), 1);
13176         if (!target_ep) {
13177             return -TARGET_EFAULT;
13178         }
13179 
13180         ep = g_try_new(struct epoll_event, maxevents);
13181         if (!ep) {
13182             unlock_user(target_ep, arg2, 0);
13183             return -TARGET_ENOMEM;
13184         }
13185 
13186         switch (num) {
13187 #if defined(TARGET_NR_epoll_pwait)
13188         case TARGET_NR_epoll_pwait:
13189         {
13190             sigset_t *set = NULL;
13191 
13192             if (arg5) {
13193                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13194                 if (ret != 0) {
13195                     break;
13196                 }
13197             }
13198 
13199             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13200                                              set, SIGSET_T_SIZE));
13201 
13202             if (set) {
13203                 finish_sigsuspend_mask(ret);
13204             }
13205             break;
13206         }
13207 #endif
13208 #if defined(TARGET_NR_epoll_wait)
13209         case TARGET_NR_epoll_wait:
13210             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13211                                              NULL, 0));
13212             break;
13213 #endif
13214         default:
13215             ret = -TARGET_ENOSYS;
13216         }
13217         if (!is_error(ret)) {
13218             int i;
13219             for (i = 0; i < ret; i++) {
13220                 target_ep[i].events = tswap32(ep[i].events);
13221                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13222             }
13223             unlock_user(target_ep, arg2,
13224                         ret * sizeof(struct target_epoll_event));
13225         } else {
13226             unlock_user(target_ep, arg2, 0);
13227         }
13228         g_free(ep);
13229         return ret;
13230     }
13231 #endif
13232 #endif
13233 #ifdef TARGET_NR_prlimit64
13234     case TARGET_NR_prlimit64:
13235     {
13236         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13237         struct target_rlimit64 *target_rnew, *target_rold;
13238         struct host_rlimit64 rnew, rold, *rnewp = 0;
13239         int resource = target_to_host_resource(arg2);
13240 
13241         if (arg3 && (resource != RLIMIT_AS &&
13242                      resource != RLIMIT_DATA &&
13243                      resource != RLIMIT_STACK)) {
13244             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13245                 return -TARGET_EFAULT;
13246             }
13247             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13248             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13249             unlock_user_struct(target_rnew, arg3, 0);
13250             rnewp = &rnew;
13251         }
13252 
13253         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13254         if (!is_error(ret) && arg4) {
13255             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13256                 return -TARGET_EFAULT;
13257             }
13258             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13259             __put_user(rold.rlim_max, &target_rold->rlim_max);
13260             unlock_user_struct(target_rold, arg4, 1);
13261         }
13262         return ret;
13263     }
13264 #endif
13265 #ifdef TARGET_NR_gethostname
13266     case TARGET_NR_gethostname:
13267     {
13268         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13269         if (name) {
13270             ret = get_errno(gethostname(name, arg2));
13271             unlock_user(name, arg1, arg2);
13272         } else {
13273             ret = -TARGET_EFAULT;
13274         }
13275         return ret;
13276     }
13277 #endif
13278 #ifdef TARGET_NR_atomic_cmpxchg_32
13279     case TARGET_NR_atomic_cmpxchg_32:
13280     {
13281         /* should use start_exclusive from main.c */
13282         abi_ulong mem_value;
13283         if (get_user_u32(mem_value, arg6)) {
13284             target_siginfo_t info;
13285             info.si_signo = SIGSEGV;
13286             info.si_errno = 0;
13287             info.si_code = TARGET_SEGV_MAPERR;
13288             info._sifields._sigfault._addr = arg6;
13289             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13290             ret = 0xdeadbeef;
13291 
13292         }
13293         if (mem_value == arg2)
13294             put_user_u32(arg1, arg6);
13295         return mem_value;
13296     }
13297 #endif
13298 #ifdef TARGET_NR_atomic_barrier
13299     case TARGET_NR_atomic_barrier:
13300         /* Like the kernel implementation and the
13301            qemu arm barrier, no-op this? */
13302         return 0;
13303 #endif
13304 
13305 #ifdef TARGET_NR_timer_create
13306     case TARGET_NR_timer_create:
13307     {
13308         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13309 
13310         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13311 
13312         int clkid = arg1;
13313         int timer_index = next_free_host_timer();
13314 
13315         if (timer_index < 0) {
13316             ret = -TARGET_EAGAIN;
13317         } else {
13318             timer_t *phtimer = g_posix_timers  + timer_index;
13319 
13320             if (arg2) {
13321                 phost_sevp = &host_sevp;
13322                 ret = target_to_host_sigevent(phost_sevp, arg2);
13323                 if (ret != 0) {
13324                     free_host_timer_slot(timer_index);
13325                     return ret;
13326                 }
13327             }
13328 
13329             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13330             if (ret) {
13331                 free_host_timer_slot(timer_index);
13332             } else {
13333                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13334                     timer_delete(*phtimer);
13335                     free_host_timer_slot(timer_index);
13336                     return -TARGET_EFAULT;
13337                 }
13338             }
13339         }
13340         return ret;
13341     }
13342 #endif
13343 
13344 #ifdef TARGET_NR_timer_settime
13345     case TARGET_NR_timer_settime:
13346     {
13347         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13348          * struct itimerspec * old_value */
13349         target_timer_t timerid = get_timer_id(arg1);
13350 
13351         if (timerid < 0) {
13352             ret = timerid;
13353         } else if (arg3 == 0) {
13354             ret = -TARGET_EINVAL;
13355         } else {
13356             timer_t htimer = g_posix_timers[timerid];
13357             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13358 
13359             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13360                 return -TARGET_EFAULT;
13361             }
13362             ret = get_errno(
13363                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13364             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13365                 return -TARGET_EFAULT;
13366             }
13367         }
13368         return ret;
13369     }
13370 #endif
13371 
13372 #ifdef TARGET_NR_timer_settime64
13373     case TARGET_NR_timer_settime64:
13374     {
13375         target_timer_t timerid = get_timer_id(arg1);
13376 
13377         if (timerid < 0) {
13378             ret = timerid;
13379         } else if (arg3 == 0) {
13380             ret = -TARGET_EINVAL;
13381         } else {
13382             timer_t htimer = g_posix_timers[timerid];
13383             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13384 
13385             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13386                 return -TARGET_EFAULT;
13387             }
13388             ret = get_errno(
13389                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13390             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13391                 return -TARGET_EFAULT;
13392             }
13393         }
13394         return ret;
13395     }
13396 #endif
13397 
13398 #ifdef TARGET_NR_timer_gettime
13399     case TARGET_NR_timer_gettime:
13400     {
13401         /* args: timer_t timerid, struct itimerspec *curr_value */
13402         target_timer_t timerid = get_timer_id(arg1);
13403 
13404         if (timerid < 0) {
13405             ret = timerid;
13406         } else if (!arg2) {
13407             ret = -TARGET_EFAULT;
13408         } else {
13409             timer_t htimer = g_posix_timers[timerid];
13410             struct itimerspec hspec;
13411             ret = get_errno(timer_gettime(htimer, &hspec));
13412 
13413             if (host_to_target_itimerspec(arg2, &hspec)) {
13414                 ret = -TARGET_EFAULT;
13415             }
13416         }
13417         return ret;
13418     }
13419 #endif
13420 
13421 #ifdef TARGET_NR_timer_gettime64
13422     case TARGET_NR_timer_gettime64:
13423     {
13424         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13425         target_timer_t timerid = get_timer_id(arg1);
13426 
13427         if (timerid < 0) {
13428             ret = timerid;
13429         } else if (!arg2) {
13430             ret = -TARGET_EFAULT;
13431         } else {
13432             timer_t htimer = g_posix_timers[timerid];
13433             struct itimerspec hspec;
13434             ret = get_errno(timer_gettime(htimer, &hspec));
13435 
13436             if (host_to_target_itimerspec64(arg2, &hspec)) {
13437                 ret = -TARGET_EFAULT;
13438             }
13439         }
13440         return ret;
13441     }
13442 #endif
13443 
13444 #ifdef TARGET_NR_timer_getoverrun
13445     case TARGET_NR_timer_getoverrun:
13446     {
13447         /* args: timer_t timerid */
13448         target_timer_t timerid = get_timer_id(arg1);
13449 
13450         if (timerid < 0) {
13451             ret = timerid;
13452         } else {
13453             timer_t htimer = g_posix_timers[timerid];
13454             ret = get_errno(timer_getoverrun(htimer));
13455         }
13456         return ret;
13457     }
13458 #endif
13459 
13460 #ifdef TARGET_NR_timer_delete
13461     case TARGET_NR_timer_delete:
13462     {
13463         /* args: timer_t timerid */
13464         target_timer_t timerid = get_timer_id(arg1);
13465 
13466         if (timerid < 0) {
13467             ret = timerid;
13468         } else {
13469             timer_t htimer = g_posix_timers[timerid];
13470             ret = get_errno(timer_delete(htimer));
13471             free_host_timer_slot(timerid);
13472         }
13473         return ret;
13474     }
13475 #endif
13476 
13477 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13478     case TARGET_NR_timerfd_create:
13479         ret = get_errno(timerfd_create(arg1,
13480                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13481         if (ret >= 0) {
13482             fd_trans_register(ret, &target_timerfd_trans);
13483         }
13484         return ret;
13485 #endif
13486 
13487 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13488     case TARGET_NR_timerfd_gettime:
13489         {
13490             struct itimerspec its_curr;
13491 
13492             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13493 
13494             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13495                 return -TARGET_EFAULT;
13496             }
13497         }
13498         return ret;
13499 #endif
13500 
13501 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13502     case TARGET_NR_timerfd_gettime64:
13503         {
13504             struct itimerspec its_curr;
13505 
13506             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13507 
13508             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13509                 return -TARGET_EFAULT;
13510             }
13511         }
13512         return ret;
13513 #endif
13514 
13515 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13516     case TARGET_NR_timerfd_settime:
13517         {
13518             struct itimerspec its_new, its_old, *p_new;
13519 
13520             if (arg3) {
13521                 if (target_to_host_itimerspec(&its_new, arg3)) {
13522                     return -TARGET_EFAULT;
13523                 }
13524                 p_new = &its_new;
13525             } else {
13526                 p_new = NULL;
13527             }
13528 
13529             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13530 
13531             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13532                 return -TARGET_EFAULT;
13533             }
13534         }
13535         return ret;
13536 #endif
13537 
13538 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13539     case TARGET_NR_timerfd_settime64:
13540         {
13541             struct itimerspec its_new, its_old, *p_new;
13542 
13543             if (arg3) {
13544                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13545                     return -TARGET_EFAULT;
13546                 }
13547                 p_new = &its_new;
13548             } else {
13549                 p_new = NULL;
13550             }
13551 
13552             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13553 
13554             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13555                 return -TARGET_EFAULT;
13556             }
13557         }
13558         return ret;
13559 #endif
13560 
13561 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13562     case TARGET_NR_ioprio_get:
13563         return get_errno(ioprio_get(arg1, arg2));
13564 #endif
13565 
13566 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13567     case TARGET_NR_ioprio_set:
13568         return get_errno(ioprio_set(arg1, arg2, arg3));
13569 #endif
13570 
13571 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13572     case TARGET_NR_setns:
13573         return get_errno(setns(arg1, arg2));
13574 #endif
13575 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13576     case TARGET_NR_unshare:
13577         return get_errno(unshare(arg1));
13578 #endif
13579 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13580     case TARGET_NR_kcmp:
13581         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13582 #endif
13583 #ifdef TARGET_NR_swapcontext
13584     case TARGET_NR_swapcontext:
13585         /* PowerPC specific.  */
13586         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13587 #endif
13588 #ifdef TARGET_NR_memfd_create
13589     case TARGET_NR_memfd_create:
13590         p = lock_user_string(arg1);
13591         if (!p) {
13592             return -TARGET_EFAULT;
13593         }
13594         ret = get_errno(memfd_create(p, arg2));
13595         fd_trans_unregister(ret);
13596         unlock_user(p, arg1, 0);
13597         return ret;
13598 #endif
13599 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13600     case TARGET_NR_membarrier:
13601         return get_errno(membarrier(arg1, arg2));
13602 #endif
13603 
13604 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13605     case TARGET_NR_copy_file_range:
13606         {
13607             loff_t inoff, outoff;
13608             loff_t *pinoff = NULL, *poutoff = NULL;
13609 
13610             if (arg2) {
13611                 if (get_user_u64(inoff, arg2)) {
13612                     return -TARGET_EFAULT;
13613                 }
13614                 pinoff = &inoff;
13615             }
13616             if (arg4) {
13617                 if (get_user_u64(outoff, arg4)) {
13618                     return -TARGET_EFAULT;
13619                 }
13620                 poutoff = &outoff;
13621             }
13622             /* Do not sign-extend the count parameter. */
13623             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13624                                                  (abi_ulong)arg5, arg6));
13625             if (!is_error(ret) && ret > 0) {
13626                 if (arg2) {
13627                     if (put_user_u64(inoff, arg2)) {
13628                         return -TARGET_EFAULT;
13629                     }
13630                 }
13631                 if (arg4) {
13632                     if (put_user_u64(outoff, arg4)) {
13633                         return -TARGET_EFAULT;
13634                     }
13635                 }
13636             }
13637         }
13638         return ret;
13639 #endif
13640 
13641 #if defined(TARGET_NR_pivot_root)
13642     case TARGET_NR_pivot_root:
13643         {
13644             void *p2;
13645             p = lock_user_string(arg1); /* new_root */
13646             p2 = lock_user_string(arg2); /* put_old */
13647             if (!p || !p2) {
13648                 ret = -TARGET_EFAULT;
13649             } else {
13650                 ret = get_errno(pivot_root(p, p2));
13651             }
13652             unlock_user(p2, arg2, 0);
13653             unlock_user(p, arg1, 0);
13654         }
13655         return ret;
13656 #endif
13657 
13658     default:
13659         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13660         return -TARGET_ENOSYS;
13661     }
13662     return ret;
13663 }
13664 
13665 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13666                     abi_long arg2, abi_long arg3, abi_long arg4,
13667                     abi_long arg5, abi_long arg6, abi_long arg7,
13668                     abi_long arg8)
13669 {
13670     CPUState *cpu = env_cpu(cpu_env);
13671     abi_long ret;
13672 
13673 #ifdef DEBUG_ERESTARTSYS
13674     /* Debug-only code for exercising the syscall-restart code paths
13675      * in the per-architecture cpu main loops: restart every syscall
13676      * the guest makes once before letting it through.
13677      */
13678     {
13679         static bool flag;
13680         flag = !flag;
13681         if (flag) {
13682             return -QEMU_ERESTARTSYS;
13683         }
13684     }
13685 #endif
13686 
13687     record_syscall_start(cpu, num, arg1,
13688                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13689 
13690     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13691         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13692     }
13693 
13694     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13695                       arg5, arg6, arg7, arg8);
13696 
13697     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13698         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13699                           arg3, arg4, arg5, arg6);
13700     }
13701 
13702     record_syscall_return(cpu, num, ret);
13703     return ret;
13704 }
13705