xref: /openbmc/qemu/linux-user/syscall.c (revision 27404b6c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809 
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_alloc_size;
815 
816     /* brk pointers are always untagged */
817 
818     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819 
820     if (!new_brk) {
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822         return target_brk;
823     }
824     if (new_brk < target_original_brk) {
825         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826                    target_brk);
827         return target_brk;
828     }
829 
830     /* If the new brk is less than the highest page reserved to the
831      * target heap allocation, set it and we're almost done...  */
832     if (new_brk <= brk_page) {
833         /* Heap contents are initialized to zero, as for anonymous
834          * mapped pages.  */
835         if (new_brk > target_brk) {
836             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837         }
838 	target_brk = new_brk;
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 	return target_brk;
841     }
842 
843     /* We need to allocate more memory after the brk... Note that
844      * we don't use MAP_FIXED because that will map over the top of
845      * any existing mapping (like the one with the host libc or qemu
846      * itself); instead we treat "mapped but at wrong address" as
847      * a failure and unmap again.
848      */
849     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851                                         PROT_READ|PROT_WRITE,
852                                         MAP_ANON|MAP_PRIVATE, 0, 0));
853 
854     if (mapped_addr == brk_page) {
855         /* Heap contents are initialized to zero, as for anonymous
856          * mapped pages.  Technically the new pages are already
857          * initialized to zero since they *are* anonymous mapped
858          * pages, however we have to take care with the contents that
859          * come from the remaining part of the previous page: it may
860          * contains garbage data due to a previous heap usage (grown
861          * then shrunken).  */
862         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863 
864         target_brk = new_brk;
865         brk_page = HOST_PAGE_ALIGN(target_brk);
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867             target_brk);
868         return target_brk;
869     } else if (mapped_addr != -1) {
870         /* Mapped but at wrong address, meaning there wasn't actually
871          * enough space for this brk.
872          */
873         target_munmap(mapped_addr, new_alloc_size);
874         mapped_addr = -1;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876     }
877     else {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648                                               abi_ulong target_addr,
1649                                               socklen_t len)
1650 {
1651     struct target_ip_mreqn *target_smreqn;
1652 
1653     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_smreqn)
1655         return -TARGET_EFAULT;
1656     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658     if (len == sizeof(struct target_ip_mreqn))
1659         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660     unlock_user(target_smreqn, target_addr, 0);
1661 
1662     return 0;
1663 }
1664 
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666                                                abi_ulong target_addr,
1667                                                socklen_t len)
1668 {
1669     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670     sa_family_t sa_family;
1671     struct target_sockaddr *target_saddr;
1672 
1673     if (fd_trans_target_to_host_addr(fd)) {
1674         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675     }
1676 
1677     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678     if (!target_saddr)
1679         return -TARGET_EFAULT;
1680 
1681     sa_family = tswap16(target_saddr->sa_family);
1682 
1683     /* Oops. The caller might send a incomplete sun_path; sun_path
1684      * must be terminated by \0 (see the manual page), but
1685      * unfortunately it is quite common to specify sockaddr_un
1686      * length as "strlen(x->sun_path)" while it should be
1687      * "strlen(...) + 1". We'll fix that here if needed.
1688      * Linux kernel has a similar feature.
1689      */
1690 
1691     if (sa_family == AF_UNIX) {
1692         if (len < unix_maxlen && len > 0) {
1693             char *cp = (char*)target_saddr;
1694 
1695             if ( cp[len-1] && !cp[len] )
1696                 len++;
1697         }
1698         if (len > unix_maxlen)
1699             len = unix_maxlen;
1700     }
1701 
1702     memcpy(addr, target_saddr, len);
1703     addr->sa_family = sa_family;
1704     if (sa_family == AF_NETLINK) {
1705         struct sockaddr_nl *nladdr;
1706 
1707         nladdr = (struct sockaddr_nl *)addr;
1708         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710     } else if (sa_family == AF_PACKET) {
1711 	struct target_sockaddr_ll *lladdr;
1712 
1713 	lladdr = (struct target_sockaddr_ll *)addr;
1714 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716     }
1717     unlock_user(target_saddr, target_addr, 0);
1718 
1719     return 0;
1720 }
1721 
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723                                                struct sockaddr *addr,
1724                                                socklen_t len)
1725 {
1726     struct target_sockaddr *target_saddr;
1727 
1728     if (len == 0) {
1729         return 0;
1730     }
1731     assert(addr);
1732 
1733     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734     if (!target_saddr)
1735         return -TARGET_EFAULT;
1736     memcpy(target_saddr, addr, len);
1737     if (len >= offsetof(struct target_sockaddr, sa_family) +
1738         sizeof(target_saddr->sa_family)) {
1739         target_saddr->sa_family = tswap16(addr->sa_family);
1740     }
1741     if (addr->sa_family == AF_NETLINK &&
1742         len >= sizeof(struct target_sockaddr_nl)) {
1743         struct target_sockaddr_nl *target_nl =
1744                (struct target_sockaddr_nl *)target_saddr;
1745         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747     } else if (addr->sa_family == AF_PACKET) {
1748         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751     } else if (addr->sa_family == AF_INET6 &&
1752                len >= sizeof(struct target_sockaddr_in6)) {
1753         struct target_sockaddr_in6 *target_in6 =
1754                (struct target_sockaddr_in6 *)target_saddr;
1755         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756     }
1757     unlock_user(target_saddr, target_addr, len);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763                                            struct target_msghdr *target_msgh)
1764 {
1765     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766     abi_long msg_controllen;
1767     abi_ulong target_cmsg_addr;
1768     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769     socklen_t space = 0;
1770 
1771     msg_controllen = tswapal(target_msgh->msg_controllen);
1772     if (msg_controllen < sizeof (struct target_cmsghdr))
1773         goto the_end;
1774     target_cmsg_addr = tswapal(target_msgh->msg_control);
1775     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776     target_cmsg_start = target_cmsg;
1777     if (!target_cmsg)
1778         return -TARGET_EFAULT;
1779 
1780     while (cmsg && target_cmsg) {
1781         void *data = CMSG_DATA(cmsg);
1782         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783 
1784         int len = tswapal(target_cmsg->cmsg_len)
1785             - sizeof(struct target_cmsghdr);
1786 
1787         space += CMSG_SPACE(len);
1788         if (space > msgh->msg_controllen) {
1789             space -= CMSG_SPACE(len);
1790             /* This is a QEMU bug, since we allocated the payload
1791              * area ourselves (unlike overflow in host-to-target
1792              * conversion, which is just the guest giving us a buffer
1793              * that's too small). It can't happen for the payload types
1794              * we currently support; if it becomes an issue in future
1795              * we would need to improve our allocation strategy to
1796              * something more intelligent than "twice the size of the
1797              * target buffer we're reading from".
1798              */
1799             qemu_log_mask(LOG_UNIMP,
1800                           ("Unsupported ancillary data %d/%d: "
1801                            "unhandled msg size\n"),
1802                           tswap32(target_cmsg->cmsg_level),
1803                           tswap32(target_cmsg->cmsg_type));
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else if (cmsg->cmsg_level == SOL_ALG) {
1833             uint32_t *dst = (uint32_t *)data;
1834 
1835             memcpy(dst, target_data, len);
1836             /* fix endianess of first 32-bit word */
1837             if (len >= sizeof(uint32_t)) {
1838                 *dst = tswap32(*dst);
1839             }
1840         } else {
1841             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1842                           cmsg->cmsg_level, cmsg->cmsg_type);
1843             memcpy(data, target_data, len);
1844         }
1845 
1846         cmsg = CMSG_NXTHDR(msgh, cmsg);
1847         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1848                                          target_cmsg_start);
1849     }
1850     unlock_user(target_cmsg, target_cmsg_addr, 0);
1851  the_end:
1852     msgh->msg_controllen = space;
1853     return 0;
1854 }
1855 
1856 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1857                                            struct msghdr *msgh)
1858 {
1859     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1860     abi_long msg_controllen;
1861     abi_ulong target_cmsg_addr;
1862     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1863     socklen_t space = 0;
1864 
1865     msg_controllen = tswapal(target_msgh->msg_controllen);
1866     if (msg_controllen < sizeof (struct target_cmsghdr))
1867         goto the_end;
1868     target_cmsg_addr = tswapal(target_msgh->msg_control);
1869     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1870     target_cmsg_start = target_cmsg;
1871     if (!target_cmsg)
1872         return -TARGET_EFAULT;
1873 
1874     while (cmsg && target_cmsg) {
1875         void *data = CMSG_DATA(cmsg);
1876         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1877 
1878         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1879         int tgt_len, tgt_space;
1880 
1881         /* We never copy a half-header but may copy half-data;
1882          * this is Linux's behaviour in put_cmsg(). Note that
1883          * truncation here is a guest problem (which we report
1884          * to the guest via the CTRUNC bit), unlike truncation
1885          * in target_to_host_cmsg, which is a QEMU bug.
1886          */
1887         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1888             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1889             break;
1890         }
1891 
1892         if (cmsg->cmsg_level == SOL_SOCKET) {
1893             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1894         } else {
1895             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1896         }
1897         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1898 
1899         /* Payload types which need a different size of payload on
1900          * the target must adjust tgt_len here.
1901          */
1902         tgt_len = len;
1903         switch (cmsg->cmsg_level) {
1904         case SOL_SOCKET:
1905             switch (cmsg->cmsg_type) {
1906             case SO_TIMESTAMP:
1907                 tgt_len = sizeof(struct target_timeval);
1908                 break;
1909             default:
1910                 break;
1911             }
1912             break;
1913         default:
1914             break;
1915         }
1916 
1917         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1918             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1919             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1920         }
1921 
1922         /* We must now copy-and-convert len bytes of payload
1923          * into tgt_len bytes of destination space. Bear in mind
1924          * that in both source and destination we may be dealing
1925          * with a truncated value!
1926          */
1927         switch (cmsg->cmsg_level) {
1928         case SOL_SOCKET:
1929             switch (cmsg->cmsg_type) {
1930             case SCM_RIGHTS:
1931             {
1932                 int *fd = (int *)data;
1933                 int *target_fd = (int *)target_data;
1934                 int i, numfds = tgt_len / sizeof(int);
1935 
1936                 for (i = 0; i < numfds; i++) {
1937                     __put_user(fd[i], target_fd + i);
1938                 }
1939                 break;
1940             }
1941             case SO_TIMESTAMP:
1942             {
1943                 struct timeval *tv = (struct timeval *)data;
1944                 struct target_timeval *target_tv =
1945                     (struct target_timeval *)target_data;
1946 
1947                 if (len != sizeof(struct timeval) ||
1948                     tgt_len != sizeof(struct target_timeval)) {
1949                     goto unimplemented;
1950                 }
1951 
1952                 /* copy struct timeval to target */
1953                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1954                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1955                 break;
1956             }
1957             case SCM_CREDENTIALS:
1958             {
1959                 struct ucred *cred = (struct ucred *)data;
1960                 struct target_ucred *target_cred =
1961                     (struct target_ucred *)target_data;
1962 
1963                 __put_user(cred->pid, &target_cred->pid);
1964                 __put_user(cred->uid, &target_cred->uid);
1965                 __put_user(cred->gid, &target_cred->gid);
1966                 break;
1967             }
1968             default:
1969                 goto unimplemented;
1970             }
1971             break;
1972 
1973         case SOL_IP:
1974             switch (cmsg->cmsg_type) {
1975             case IP_TTL:
1976             {
1977                 uint32_t *v = (uint32_t *)data;
1978                 uint32_t *t_int = (uint32_t *)target_data;
1979 
1980                 if (len != sizeof(uint32_t) ||
1981                     tgt_len != sizeof(uint32_t)) {
1982                     goto unimplemented;
1983                 }
1984                 __put_user(*v, t_int);
1985                 break;
1986             }
1987             case IP_RECVERR:
1988             {
1989                 struct errhdr_t {
1990                    struct sock_extended_err ee;
1991                    struct sockaddr_in offender;
1992                 };
1993                 struct errhdr_t *errh = (struct errhdr_t *)data;
1994                 struct errhdr_t *target_errh =
1995                     (struct errhdr_t *)target_data;
1996 
1997                 if (len != sizeof(struct errhdr_t) ||
1998                     tgt_len != sizeof(struct errhdr_t)) {
1999                     goto unimplemented;
2000                 }
2001                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2002                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2003                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2004                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2005                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2006                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2007                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2008                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2009                     (void *) &errh->offender, sizeof(errh->offender));
2010                 break;
2011             }
2012             default:
2013                 goto unimplemented;
2014             }
2015             break;
2016 
2017         case SOL_IPV6:
2018             switch (cmsg->cmsg_type) {
2019             case IPV6_HOPLIMIT:
2020             {
2021                 uint32_t *v = (uint32_t *)data;
2022                 uint32_t *t_int = (uint32_t *)target_data;
2023 
2024                 if (len != sizeof(uint32_t) ||
2025                     tgt_len != sizeof(uint32_t)) {
2026                     goto unimplemented;
2027                 }
2028                 __put_user(*v, t_int);
2029                 break;
2030             }
2031             case IPV6_RECVERR:
2032             {
2033                 struct errhdr6_t {
2034                    struct sock_extended_err ee;
2035                    struct sockaddr_in6 offender;
2036                 };
2037                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2038                 struct errhdr6_t *target_errh =
2039                     (struct errhdr6_t *)target_data;
2040 
2041                 if (len != sizeof(struct errhdr6_t) ||
2042                     tgt_len != sizeof(struct errhdr6_t)) {
2043                     goto unimplemented;
2044                 }
2045                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2046                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2047                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2048                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2049                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2050                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2051                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2052                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2053                     (void *) &errh->offender, sizeof(errh->offender));
2054                 break;
2055             }
2056             default:
2057                 goto unimplemented;
2058             }
2059             break;
2060 
2061         default:
2062         unimplemented:
2063             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2064                           cmsg->cmsg_level, cmsg->cmsg_type);
2065             memcpy(target_data, data, MIN(len, tgt_len));
2066             if (tgt_len > len) {
2067                 memset(target_data + len, 0, tgt_len - len);
2068             }
2069         }
2070 
2071         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2072         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2073         if (msg_controllen < tgt_space) {
2074             tgt_space = msg_controllen;
2075         }
2076         msg_controllen -= tgt_space;
2077         space += tgt_space;
2078         cmsg = CMSG_NXTHDR(msgh, cmsg);
2079         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2080                                          target_cmsg_start);
2081     }
2082     unlock_user(target_cmsg, target_cmsg_addr, space);
2083  the_end:
2084     target_msgh->msg_controllen = tswapal(space);
2085     return 0;
2086 }
2087 
2088 /* do_setsockopt() Must return target values and target errnos. */
2089 static abi_long do_setsockopt(int sockfd, int level, int optname,
2090                               abi_ulong optval_addr, socklen_t optlen)
2091 {
2092     abi_long ret;
2093     int val;
2094     struct ip_mreqn *ip_mreq;
2095     struct ip_mreq_source *ip_mreq_source;
2096 
2097     switch(level) {
2098     case SOL_TCP:
2099     case SOL_UDP:
2100         /* TCP and UDP options all take an 'int' value.  */
2101         if (optlen < sizeof(uint32_t))
2102             return -TARGET_EINVAL;
2103 
2104         if (get_user_u32(val, optval_addr))
2105             return -TARGET_EFAULT;
2106         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2107         break;
2108     case SOL_IP:
2109         switch(optname) {
2110         case IP_TOS:
2111         case IP_TTL:
2112         case IP_HDRINCL:
2113         case IP_ROUTER_ALERT:
2114         case IP_RECVOPTS:
2115         case IP_RETOPTS:
2116         case IP_PKTINFO:
2117         case IP_MTU_DISCOVER:
2118         case IP_RECVERR:
2119         case IP_RECVTTL:
2120         case IP_RECVTOS:
2121 #ifdef IP_FREEBIND
2122         case IP_FREEBIND:
2123 #endif
2124         case IP_MULTICAST_TTL:
2125         case IP_MULTICAST_LOOP:
2126             val = 0;
2127             if (optlen >= sizeof(uint32_t)) {
2128                 if (get_user_u32(val, optval_addr))
2129                     return -TARGET_EFAULT;
2130             } else if (optlen >= 1) {
2131                 if (get_user_u8(val, optval_addr))
2132                     return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2135             break;
2136         case IP_ADD_MEMBERSHIP:
2137         case IP_DROP_MEMBERSHIP:
2138             if (optlen < sizeof (struct target_ip_mreq) ||
2139                 optlen > sizeof (struct target_ip_mreqn))
2140                 return -TARGET_EINVAL;
2141 
2142             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2143             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2144             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2145             break;
2146 
2147         case IP_BLOCK_SOURCE:
2148         case IP_UNBLOCK_SOURCE:
2149         case IP_ADD_SOURCE_MEMBERSHIP:
2150         case IP_DROP_SOURCE_MEMBERSHIP:
2151             if (optlen != sizeof (struct target_ip_mreq_source))
2152                 return -TARGET_EINVAL;
2153 
2154             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2155             if (!ip_mreq_source) {
2156                 return -TARGET_EFAULT;
2157             }
2158             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2159             unlock_user (ip_mreq_source, optval_addr, 0);
2160             break;
2161 
2162         default:
2163             goto unimplemented;
2164         }
2165         break;
2166     case SOL_IPV6:
2167         switch (optname) {
2168         case IPV6_MTU_DISCOVER:
2169         case IPV6_MTU:
2170         case IPV6_V6ONLY:
2171         case IPV6_RECVPKTINFO:
2172         case IPV6_UNICAST_HOPS:
2173         case IPV6_MULTICAST_HOPS:
2174         case IPV6_MULTICAST_LOOP:
2175         case IPV6_RECVERR:
2176         case IPV6_RECVHOPLIMIT:
2177         case IPV6_2292HOPLIMIT:
2178         case IPV6_CHECKSUM:
2179         case IPV6_ADDRFORM:
2180         case IPV6_2292PKTINFO:
2181         case IPV6_RECVTCLASS:
2182         case IPV6_RECVRTHDR:
2183         case IPV6_2292RTHDR:
2184         case IPV6_RECVHOPOPTS:
2185         case IPV6_2292HOPOPTS:
2186         case IPV6_RECVDSTOPTS:
2187         case IPV6_2292DSTOPTS:
2188         case IPV6_TCLASS:
2189         case IPV6_ADDR_PREFERENCES:
2190 #ifdef IPV6_RECVPATHMTU
2191         case IPV6_RECVPATHMTU:
2192 #endif
2193 #ifdef IPV6_TRANSPARENT
2194         case IPV6_TRANSPARENT:
2195 #endif
2196 #ifdef IPV6_FREEBIND
2197         case IPV6_FREEBIND:
2198 #endif
2199 #ifdef IPV6_RECVORIGDSTADDR
2200         case IPV6_RECVORIGDSTADDR:
2201 #endif
2202             val = 0;
2203             if (optlen < sizeof(uint32_t)) {
2204                 return -TARGET_EINVAL;
2205             }
2206             if (get_user_u32(val, optval_addr)) {
2207                 return -TARGET_EFAULT;
2208             }
2209             ret = get_errno(setsockopt(sockfd, level, optname,
2210                                        &val, sizeof(val)));
2211             break;
2212         case IPV6_PKTINFO:
2213         {
2214             struct in6_pktinfo pki;
2215 
2216             if (optlen < sizeof(pki)) {
2217                 return -TARGET_EINVAL;
2218             }
2219 
2220             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2221                 return -TARGET_EFAULT;
2222             }
2223 
2224             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2225 
2226             ret = get_errno(setsockopt(sockfd, level, optname,
2227                                        &pki, sizeof(pki)));
2228             break;
2229         }
2230         case IPV6_ADD_MEMBERSHIP:
2231         case IPV6_DROP_MEMBERSHIP:
2232         {
2233             struct ipv6_mreq ipv6mreq;
2234 
2235             if (optlen < sizeof(ipv6mreq)) {
2236                 return -TARGET_EINVAL;
2237             }
2238 
2239             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2240                 return -TARGET_EFAULT;
2241             }
2242 
2243             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2244 
2245             ret = get_errno(setsockopt(sockfd, level, optname,
2246                                        &ipv6mreq, sizeof(ipv6mreq)));
2247             break;
2248         }
2249         default:
2250             goto unimplemented;
2251         }
2252         break;
2253     case SOL_ICMPV6:
2254         switch (optname) {
2255         case ICMPV6_FILTER:
2256         {
2257             struct icmp6_filter icmp6f;
2258 
2259             if (optlen > sizeof(icmp6f)) {
2260                 optlen = sizeof(icmp6f);
2261             }
2262 
2263             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2264                 return -TARGET_EFAULT;
2265             }
2266 
2267             for (val = 0; val < 8; val++) {
2268                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2269             }
2270 
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &icmp6f, optlen));
2273             break;
2274         }
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279     case SOL_RAW:
2280         switch (optname) {
2281         case ICMP_FILTER:
2282         case IPV6_CHECKSUM:
2283             /* those take an u32 value */
2284             if (optlen < sizeof(uint32_t)) {
2285                 return -TARGET_EINVAL;
2286             }
2287 
2288             if (get_user_u32(val, optval_addr)) {
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        &val, sizeof(val)));
2293             break;
2294 
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2300     case SOL_ALG:
2301         switch (optname) {
2302         case ALG_SET_KEY:
2303         {
2304             char *alg_key = g_malloc(optlen);
2305 
2306             if (!alg_key) {
2307                 return -TARGET_ENOMEM;
2308             }
2309             if (copy_from_user(alg_key, optval_addr, optlen)) {
2310                 g_free(alg_key);
2311                 return -TARGET_EFAULT;
2312             }
2313             ret = get_errno(setsockopt(sockfd, level, optname,
2314                                        alg_key, optlen));
2315             g_free(alg_key);
2316             break;
2317         }
2318         case ALG_SET_AEAD_AUTHSIZE:
2319         {
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        NULL, optlen));
2322             break;
2323         }
2324         default:
2325             goto unimplemented;
2326         }
2327         break;
2328 #endif
2329     case TARGET_SOL_SOCKET:
2330         switch (optname) {
2331         case TARGET_SO_RCVTIMEO:
2332         {
2333                 struct timeval tv;
2334 
2335                 optname = SO_RCVTIMEO;
2336 
2337 set_timeout:
2338                 if (optlen != sizeof(struct target_timeval)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341 
2342                 if (copy_from_user_timeval(&tv, optval_addr)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345 
2346                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2347                                 &tv, sizeof(tv)));
2348                 return ret;
2349         }
2350         case TARGET_SO_SNDTIMEO:
2351                 optname = SO_SNDTIMEO;
2352                 goto set_timeout;
2353         case TARGET_SO_ATTACH_FILTER:
2354         {
2355                 struct target_sock_fprog *tfprog;
2356                 struct target_sock_filter *tfilter;
2357                 struct sock_fprog fprog;
2358                 struct sock_filter *filter;
2359                 int i;
2360 
2361                 if (optlen != sizeof(*tfprog)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2365                     return -TARGET_EFAULT;
2366                 }
2367                 if (!lock_user_struct(VERIFY_READ, tfilter,
2368                                       tswapal(tfprog->filter), 0)) {
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 fprog.len = tswap16(tfprog->len);
2374                 filter = g_try_new(struct sock_filter, fprog.len);
2375                 if (filter == NULL) {
2376                     unlock_user_struct(tfilter, tfprog->filter, 1);
2377                     unlock_user_struct(tfprog, optval_addr, 1);
2378                     return -TARGET_ENOMEM;
2379                 }
2380                 for (i = 0; i < fprog.len; i++) {
2381                     filter[i].code = tswap16(tfilter[i].code);
2382                     filter[i].jt = tfilter[i].jt;
2383                     filter[i].jf = tfilter[i].jf;
2384                     filter[i].k = tswap32(tfilter[i].k);
2385                 }
2386                 fprog.filter = filter;
2387 
2388                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2390                 g_free(filter);
2391 
2392                 unlock_user_struct(tfilter, tfprog->filter, 1);
2393                 unlock_user_struct(tfprog, optval_addr, 1);
2394                 return ret;
2395         }
2396 	case TARGET_SO_BINDTODEVICE:
2397 	{
2398 		char *dev_ifname, *addr_ifname;
2399 
2400 		if (optlen > IFNAMSIZ - 1) {
2401 		    optlen = IFNAMSIZ - 1;
2402 		}
2403 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2404 		if (!dev_ifname) {
2405 		    return -TARGET_EFAULT;
2406 		}
2407 		optname = SO_BINDTODEVICE;
2408 		addr_ifname = alloca(IFNAMSIZ);
2409 		memcpy(addr_ifname, dev_ifname, optlen);
2410 		addr_ifname[optlen] = 0;
2411 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2412                                            addr_ifname, optlen));
2413 		unlock_user (dev_ifname, optval_addr, 0);
2414 		return ret;
2415 	}
2416         case TARGET_SO_LINGER:
2417         {
2418                 struct linger lg;
2419                 struct target_linger *tlg;
2420 
2421                 if (optlen != sizeof(struct target_linger)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2425                     return -TARGET_EFAULT;
2426                 }
2427                 __get_user(lg.l_onoff, &tlg->l_onoff);
2428                 __get_user(lg.l_linger, &tlg->l_linger);
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2430                                 &lg, sizeof(lg)));
2431                 unlock_user_struct(tlg, optval_addr, 0);
2432                 return ret;
2433         }
2434             /* Options with 'int' argument.  */
2435         case TARGET_SO_DEBUG:
2436 		optname = SO_DEBUG;
2437 		break;
2438         case TARGET_SO_REUSEADDR:
2439 		optname = SO_REUSEADDR;
2440 		break;
2441 #ifdef SO_REUSEPORT
2442         case TARGET_SO_REUSEPORT:
2443                 optname = SO_REUSEPORT;
2444                 break;
2445 #endif
2446         case TARGET_SO_TYPE:
2447 		optname = SO_TYPE;
2448 		break;
2449         case TARGET_SO_ERROR:
2450 		optname = SO_ERROR;
2451 		break;
2452         case TARGET_SO_DONTROUTE:
2453 		optname = SO_DONTROUTE;
2454 		break;
2455         case TARGET_SO_BROADCAST:
2456 		optname = SO_BROADCAST;
2457 		break;
2458         case TARGET_SO_SNDBUF:
2459 		optname = SO_SNDBUF;
2460 		break;
2461         case TARGET_SO_SNDBUFFORCE:
2462                 optname = SO_SNDBUFFORCE;
2463                 break;
2464         case TARGET_SO_RCVBUF:
2465 		optname = SO_RCVBUF;
2466 		break;
2467         case TARGET_SO_RCVBUFFORCE:
2468                 optname = SO_RCVBUFFORCE;
2469                 break;
2470         case TARGET_SO_KEEPALIVE:
2471 		optname = SO_KEEPALIVE;
2472 		break;
2473         case TARGET_SO_OOBINLINE:
2474 		optname = SO_OOBINLINE;
2475 		break;
2476         case TARGET_SO_NO_CHECK:
2477 		optname = SO_NO_CHECK;
2478 		break;
2479         case TARGET_SO_PRIORITY:
2480 		optname = SO_PRIORITY;
2481 		break;
2482 #ifdef SO_BSDCOMPAT
2483         case TARGET_SO_BSDCOMPAT:
2484 		optname = SO_BSDCOMPAT;
2485 		break;
2486 #endif
2487         case TARGET_SO_PASSCRED:
2488 		optname = SO_PASSCRED;
2489 		break;
2490         case TARGET_SO_PASSSEC:
2491                 optname = SO_PASSSEC;
2492                 break;
2493         case TARGET_SO_TIMESTAMP:
2494 		optname = SO_TIMESTAMP;
2495 		break;
2496         case TARGET_SO_RCVLOWAT:
2497 		optname = SO_RCVLOWAT;
2498 		break;
2499         default:
2500             goto unimplemented;
2501         }
2502 	if (optlen < sizeof(uint32_t))
2503             return -TARGET_EINVAL;
2504 
2505 	if (get_user_u32(val, optval_addr))
2506             return -TARGET_EFAULT;
2507 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508         break;
2509 #ifdef SOL_NETLINK
2510     case SOL_NETLINK:
2511         switch (optname) {
2512         case NETLINK_PKTINFO:
2513         case NETLINK_ADD_MEMBERSHIP:
2514         case NETLINK_DROP_MEMBERSHIP:
2515         case NETLINK_BROADCAST_ERROR:
2516         case NETLINK_NO_ENOBUFS:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518         case NETLINK_LISTEN_ALL_NSID:
2519         case NETLINK_CAP_ACK:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522         case NETLINK_EXT_ACK:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525         case NETLINK_GET_STRICT_CHK:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2527             break;
2528         default:
2529             goto unimplemented;
2530         }
2531         val = 0;
2532         if (optlen < sizeof(uint32_t)) {
2533             return -TARGET_EINVAL;
2534         }
2535         if (get_user_u32(val, optval_addr)) {
2536             return -TARGET_EFAULT;
2537         }
2538         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2539                                    sizeof(val)));
2540         break;
2541 #endif /* SOL_NETLINK */
2542     default:
2543     unimplemented:
2544         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2545                       level, optname);
2546         ret = -TARGET_ENOPROTOOPT;
2547     }
2548     return ret;
2549 }
2550 
2551 /* do_getsockopt() Must return target values and target errnos. */
2552 static abi_long do_getsockopt(int sockfd, int level, int optname,
2553                               abi_ulong optval_addr, abi_ulong optlen)
2554 {
2555     abi_long ret;
2556     int len, val;
2557     socklen_t lv;
2558 
2559     switch(level) {
2560     case TARGET_SOL_SOCKET:
2561         level = SOL_SOCKET;
2562         switch (optname) {
2563         /* These don't just return a single integer */
2564         case TARGET_SO_PEERNAME:
2565             goto unimplemented;
2566         case TARGET_SO_RCVTIMEO: {
2567             struct timeval tv;
2568             socklen_t tvlen;
2569 
2570             optname = SO_RCVTIMEO;
2571 
2572 get_timeout:
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             tvlen = sizeof(tv);
2581             ret = get_errno(getsockopt(sockfd, level, optname,
2582                                        &tv, &tvlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > sizeof(struct target_timeval)) {
2587                 len = sizeof(struct target_timeval);
2588             }
2589             if (copy_to_user_timeval(optval_addr, &tv)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             if (put_user_u32(len, optlen)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             break;
2596         }
2597         case TARGET_SO_SNDTIMEO:
2598             optname = SO_SNDTIMEO;
2599             goto get_timeout;
2600         case TARGET_SO_PEERCRED: {
2601             struct ucred cr;
2602             socklen_t crlen;
2603             struct target_ucred *tcr;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611 
2612             crlen = sizeof(cr);
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2614                                        &cr, &crlen));
2615             if (ret < 0) {
2616                 return ret;
2617             }
2618             if (len > crlen) {
2619                 len = crlen;
2620             }
2621             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2622                 return -TARGET_EFAULT;
2623             }
2624             __put_user(cr.pid, &tcr->pid);
2625             __put_user(cr.uid, &tcr->uid);
2626             __put_user(cr.gid, &tcr->gid);
2627             unlock_user_struct(tcr, optval_addr, 1);
2628             if (put_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             break;
2632         }
2633         case TARGET_SO_PEERSEC: {
2634             char *name;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2643             if (!name) {
2644                 return -TARGET_EFAULT;
2645             }
2646             lv = len;
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2648                                        name, &lv));
2649             if (put_user_u32(lv, optlen)) {
2650                 ret = -TARGET_EFAULT;
2651             }
2652             unlock_user(name, optval_addr, lv);
2653             break;
2654         }
2655         case TARGET_SO_LINGER:
2656         {
2657             struct linger lg;
2658             socklen_t lglen;
2659             struct target_linger *tlg;
2660 
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             lglen = sizeof(lg);
2669             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2670                                        &lg, &lglen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > lglen) {
2675                 len = lglen;
2676             }
2677             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             __put_user(lg.l_onoff, &tlg->l_onoff);
2681             __put_user(lg.l_linger, &tlg->l_linger);
2682             unlock_user_struct(tlg, optval_addr, 1);
2683             if (put_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             break;
2687         }
2688         /* Options with 'int' argument.  */
2689         case TARGET_SO_DEBUG:
2690             optname = SO_DEBUG;
2691             goto int_case;
2692         case TARGET_SO_REUSEADDR:
2693             optname = SO_REUSEADDR;
2694             goto int_case;
2695 #ifdef SO_REUSEPORT
2696         case TARGET_SO_REUSEPORT:
2697             optname = SO_REUSEPORT;
2698             goto int_case;
2699 #endif
2700         case TARGET_SO_TYPE:
2701             optname = SO_TYPE;
2702             goto int_case;
2703         case TARGET_SO_ERROR:
2704             optname = SO_ERROR;
2705             goto int_case;
2706         case TARGET_SO_DONTROUTE:
2707             optname = SO_DONTROUTE;
2708             goto int_case;
2709         case TARGET_SO_BROADCAST:
2710             optname = SO_BROADCAST;
2711             goto int_case;
2712         case TARGET_SO_SNDBUF:
2713             optname = SO_SNDBUF;
2714             goto int_case;
2715         case TARGET_SO_RCVBUF:
2716             optname = SO_RCVBUF;
2717             goto int_case;
2718         case TARGET_SO_KEEPALIVE:
2719             optname = SO_KEEPALIVE;
2720             goto int_case;
2721         case TARGET_SO_OOBINLINE:
2722             optname = SO_OOBINLINE;
2723             goto int_case;
2724         case TARGET_SO_NO_CHECK:
2725             optname = SO_NO_CHECK;
2726             goto int_case;
2727         case TARGET_SO_PRIORITY:
2728             optname = SO_PRIORITY;
2729             goto int_case;
2730 #ifdef SO_BSDCOMPAT
2731         case TARGET_SO_BSDCOMPAT:
2732             optname = SO_BSDCOMPAT;
2733             goto int_case;
2734 #endif
2735         case TARGET_SO_PASSCRED:
2736             optname = SO_PASSCRED;
2737             goto int_case;
2738         case TARGET_SO_TIMESTAMP:
2739             optname = SO_TIMESTAMP;
2740             goto int_case;
2741         case TARGET_SO_RCVLOWAT:
2742             optname = SO_RCVLOWAT;
2743             goto int_case;
2744         case TARGET_SO_ACCEPTCONN:
2745             optname = SO_ACCEPTCONN;
2746             goto int_case;
2747         case TARGET_SO_PROTOCOL:
2748             optname = SO_PROTOCOL;
2749             goto int_case;
2750         case TARGET_SO_DOMAIN:
2751             optname = SO_DOMAIN;
2752             goto int_case;
2753         default:
2754             goto int_case;
2755         }
2756         break;
2757     case SOL_TCP:
2758     case SOL_UDP:
2759         /* TCP and UDP options all take an 'int' value.  */
2760     int_case:
2761         if (get_user_u32(len, optlen))
2762             return -TARGET_EFAULT;
2763         if (len < 0)
2764             return -TARGET_EINVAL;
2765         lv = sizeof(lv);
2766         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2767         if (ret < 0)
2768             return ret;
2769         switch (optname) {
2770         case SO_TYPE:
2771             val = host_to_target_sock_type(val);
2772             break;
2773         case SO_ERROR:
2774             val = host_to_target_errno(val);
2775             break;
2776         }
2777         if (len > lv)
2778             len = lv;
2779         if (len == 4) {
2780             if (put_user_u32(val, optval_addr))
2781                 return -TARGET_EFAULT;
2782         } else {
2783             if (put_user_u8(val, optval_addr))
2784                 return -TARGET_EFAULT;
2785         }
2786         if (put_user_u32(len, optlen))
2787             return -TARGET_EFAULT;
2788         break;
2789     case SOL_IP:
2790         switch(optname) {
2791         case IP_TOS:
2792         case IP_TTL:
2793         case IP_HDRINCL:
2794         case IP_ROUTER_ALERT:
2795         case IP_RECVOPTS:
2796         case IP_RETOPTS:
2797         case IP_PKTINFO:
2798         case IP_MTU_DISCOVER:
2799         case IP_RECVERR:
2800         case IP_RECVTOS:
2801 #ifdef IP_FREEBIND
2802         case IP_FREEBIND:
2803 #endif
2804         case IP_MULTICAST_TTL:
2805         case IP_MULTICAST_LOOP:
2806             if (get_user_u32(len, optlen))
2807                 return -TARGET_EFAULT;
2808             if (len < 0)
2809                 return -TARGET_EINVAL;
2810             lv = sizeof(lv);
2811             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2812             if (ret < 0)
2813                 return ret;
2814             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2815                 len = 1;
2816                 if (put_user_u32(len, optlen)
2817                     || put_user_u8(val, optval_addr))
2818                     return -TARGET_EFAULT;
2819             } else {
2820                 if (len > sizeof(int))
2821                     len = sizeof(int);
2822                 if (put_user_u32(len, optlen)
2823                     || put_user_u32(val, optval_addr))
2824                     return -TARGET_EFAULT;
2825             }
2826             break;
2827         default:
2828             ret = -TARGET_ENOPROTOOPT;
2829             break;
2830         }
2831         break;
2832     case SOL_IPV6:
2833         switch (optname) {
2834         case IPV6_MTU_DISCOVER:
2835         case IPV6_MTU:
2836         case IPV6_V6ONLY:
2837         case IPV6_RECVPKTINFO:
2838         case IPV6_UNICAST_HOPS:
2839         case IPV6_MULTICAST_HOPS:
2840         case IPV6_MULTICAST_LOOP:
2841         case IPV6_RECVERR:
2842         case IPV6_RECVHOPLIMIT:
2843         case IPV6_2292HOPLIMIT:
2844         case IPV6_CHECKSUM:
2845         case IPV6_ADDRFORM:
2846         case IPV6_2292PKTINFO:
2847         case IPV6_RECVTCLASS:
2848         case IPV6_RECVRTHDR:
2849         case IPV6_2292RTHDR:
2850         case IPV6_RECVHOPOPTS:
2851         case IPV6_2292HOPOPTS:
2852         case IPV6_RECVDSTOPTS:
2853         case IPV6_2292DSTOPTS:
2854         case IPV6_TCLASS:
2855         case IPV6_ADDR_PREFERENCES:
2856 #ifdef IPV6_RECVPATHMTU
2857         case IPV6_RECVPATHMTU:
2858 #endif
2859 #ifdef IPV6_TRANSPARENT
2860         case IPV6_TRANSPARENT:
2861 #endif
2862 #ifdef IPV6_FREEBIND
2863         case IPV6_FREEBIND:
2864 #endif
2865 #ifdef IPV6_RECVORIGDSTADDR
2866         case IPV6_RECVORIGDSTADDR:
2867 #endif
2868             if (get_user_u32(len, optlen))
2869                 return -TARGET_EFAULT;
2870             if (len < 0)
2871                 return -TARGET_EINVAL;
2872             lv = sizeof(lv);
2873             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2874             if (ret < 0)
2875                 return ret;
2876             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2877                 len = 1;
2878                 if (put_user_u32(len, optlen)
2879                     || put_user_u8(val, optval_addr))
2880                     return -TARGET_EFAULT;
2881             } else {
2882                 if (len > sizeof(int))
2883                     len = sizeof(int);
2884                 if (put_user_u32(len, optlen)
2885                     || put_user_u32(val, optval_addr))
2886                     return -TARGET_EFAULT;
2887             }
2888             break;
2889         default:
2890             ret = -TARGET_ENOPROTOOPT;
2891             break;
2892         }
2893         break;
2894 #ifdef SOL_NETLINK
2895     case SOL_NETLINK:
2896         switch (optname) {
2897         case NETLINK_PKTINFO:
2898         case NETLINK_BROADCAST_ERROR:
2899         case NETLINK_NO_ENOBUFS:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901         case NETLINK_LISTEN_ALL_NSID:
2902         case NETLINK_CAP_ACK:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905         case NETLINK_EXT_ACK:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908         case NETLINK_GET_STRICT_CHK:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910             if (get_user_u32(len, optlen)) {
2911                 return -TARGET_EFAULT;
2912             }
2913             if (len != sizeof(val)) {
2914                 return -TARGET_EINVAL;
2915             }
2916             lv = len;
2917             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2918             if (ret < 0) {
2919                 return ret;
2920             }
2921             if (put_user_u32(lv, optlen)
2922                 || put_user_u32(val, optval_addr)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             break;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927         case NETLINK_LIST_MEMBERSHIPS:
2928         {
2929             uint32_t *results;
2930             int i;
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len < 0) {
2935                 return -TARGET_EINVAL;
2936             }
2937             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2938             if (!results && len > 0) {
2939                 return -TARGET_EFAULT;
2940             }
2941             lv = len;
2942             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2943             if (ret < 0) {
2944                 unlock_user(results, optval_addr, 0);
2945                 return ret;
2946             }
2947             /* swap host endianess to target endianess. */
2948             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2949                 results[i] = tswap32(results[i]);
2950             }
2951             if (put_user_u32(lv, optlen)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             unlock_user(results, optval_addr, 0);
2955             break;
2956         }
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2958         default:
2959             goto unimplemented;
2960         }
2961         break;
2962 #endif /* SOL_NETLINK */
2963     default:
2964     unimplemented:
2965         qemu_log_mask(LOG_UNIMP,
2966                       "getsockopt level=%d optname=%d not yet supported\n",
2967                       level, optname);
2968         ret = -TARGET_EOPNOTSUPP;
2969         break;
2970     }
2971     return ret;
2972 }
2973 
2974 /* Convert target low/high pair representing file offset into the host
2975  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976  * as the kernel doesn't handle them either.
2977  */
2978 static void target_to_host_low_high(abi_ulong tlow,
2979                                     abi_ulong thigh,
2980                                     unsigned long *hlow,
2981                                     unsigned long *hhigh)
2982 {
2983     uint64_t off = tlow |
2984         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2985         TARGET_LONG_BITS / 2;
2986 
2987     *hlow = off;
2988     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2989 }
2990 
2991 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2992                                 abi_ulong count, int copy)
2993 {
2994     struct target_iovec *target_vec;
2995     struct iovec *vec;
2996     abi_ulong total_len, max_len;
2997     int i;
2998     int err = 0;
2999     bool bad_address = false;
3000 
3001     if (count == 0) {
3002         errno = 0;
3003         return NULL;
3004     }
3005     if (count > IOV_MAX) {
3006         errno = EINVAL;
3007         return NULL;
3008     }
3009 
3010     vec = g_try_new0(struct iovec, count);
3011     if (vec == NULL) {
3012         errno = ENOMEM;
3013         return NULL;
3014     }
3015 
3016     target_vec = lock_user(VERIFY_READ, target_addr,
3017                            count * sizeof(struct target_iovec), 1);
3018     if (target_vec == NULL) {
3019         err = EFAULT;
3020         goto fail2;
3021     }
3022 
3023     /* ??? If host page size > target page size, this will result in a
3024        value larger than what we can actually support.  */
3025     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3026     total_len = 0;
3027 
3028     for (i = 0; i < count; i++) {
3029         abi_ulong base = tswapal(target_vec[i].iov_base);
3030         abi_long len = tswapal(target_vec[i].iov_len);
3031 
3032         if (len < 0) {
3033             err = EINVAL;
3034             goto fail;
3035         } else if (len == 0) {
3036             /* Zero length pointer is ignored.  */
3037             vec[i].iov_base = 0;
3038         } else {
3039             vec[i].iov_base = lock_user(type, base, len, copy);
3040             /* If the first buffer pointer is bad, this is a fault.  But
3041              * subsequent bad buffers will result in a partial write; this
3042              * is realized by filling the vector with null pointers and
3043              * zero lengths. */
3044             if (!vec[i].iov_base) {
3045                 if (i == 0) {
3046                     err = EFAULT;
3047                     goto fail;
3048                 } else {
3049                     bad_address = true;
3050                 }
3051             }
3052             if (bad_address) {
3053                 len = 0;
3054             }
3055             if (len > max_len - total_len) {
3056                 len = max_len - total_len;
3057             }
3058         }
3059         vec[i].iov_len = len;
3060         total_len += len;
3061     }
3062 
3063     unlock_user(target_vec, target_addr, 0);
3064     return vec;
3065 
3066  fail:
3067     while (--i >= 0) {
3068         if (tswapal(target_vec[i].iov_len) > 0) {
3069             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3070         }
3071     }
3072     unlock_user(target_vec, target_addr, 0);
3073  fail2:
3074     g_free(vec);
3075     errno = err;
3076     return NULL;
3077 }
3078 
3079 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3080                          abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     int i;
3084 
3085     target_vec = lock_user(VERIFY_READ, target_addr,
3086                            count * sizeof(struct target_iovec), 1);
3087     if (target_vec) {
3088         for (i = 0; i < count; i++) {
3089             abi_ulong base = tswapal(target_vec[i].iov_base);
3090             abi_long len = tswapal(target_vec[i].iov_len);
3091             if (len < 0) {
3092                 break;
3093             }
3094             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3095         }
3096         unlock_user(target_vec, target_addr, 0);
3097     }
3098 
3099     g_free(vec);
3100 }
3101 
3102 static inline int target_to_host_sock_type(int *type)
3103 {
3104     int host_type = 0;
3105     int target_type = *type;
3106 
3107     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3108     case TARGET_SOCK_DGRAM:
3109         host_type = SOCK_DGRAM;
3110         break;
3111     case TARGET_SOCK_STREAM:
3112         host_type = SOCK_STREAM;
3113         break;
3114     default:
3115         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3116         break;
3117     }
3118     if (target_type & TARGET_SOCK_CLOEXEC) {
3119 #if defined(SOCK_CLOEXEC)
3120         host_type |= SOCK_CLOEXEC;
3121 #else
3122         return -TARGET_EINVAL;
3123 #endif
3124     }
3125     if (target_type & TARGET_SOCK_NONBLOCK) {
3126 #if defined(SOCK_NONBLOCK)
3127         host_type |= SOCK_NONBLOCK;
3128 #elif !defined(O_NONBLOCK)
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     *type = host_type;
3133     return 0;
3134 }
3135 
3136 /* Try to emulate socket type flags after socket creation.  */
3137 static int sock_flags_fixup(int fd, int target_type)
3138 {
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140     if (target_type & TARGET_SOCK_NONBLOCK) {
3141         int flags = fcntl(fd, F_GETFL);
3142         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3143             close(fd);
3144             return -TARGET_EINVAL;
3145         }
3146     }
3147 #endif
3148     return fd;
3149 }
3150 
3151 /* do_socket() Must return target values and target errnos. */
3152 static abi_long do_socket(int domain, int type, int protocol)
3153 {
3154     int target_type = type;
3155     int ret;
3156 
3157     ret = target_to_host_sock_type(&type);
3158     if (ret) {
3159         return ret;
3160     }
3161 
3162     if (domain == PF_NETLINK && !(
3163 #ifdef CONFIG_RTNETLINK
3164          protocol == NETLINK_ROUTE ||
3165 #endif
3166          protocol == NETLINK_KOBJECT_UEVENT ||
3167          protocol == NETLINK_AUDIT)) {
3168         return -TARGET_EPROTONOSUPPORT;
3169     }
3170 
3171     if (domain == AF_PACKET ||
3172         (domain == AF_INET && type == SOCK_PACKET)) {
3173         protocol = tswap16(protocol);
3174     }
3175 
3176     ret = get_errno(socket(domain, type, protocol));
3177     if (ret >= 0) {
3178         ret = sock_flags_fixup(ret, target_type);
3179         if (type == SOCK_PACKET) {
3180             /* Manage an obsolete case :
3181              * if socket type is SOCK_PACKET, bind by name
3182              */
3183             fd_trans_register(ret, &target_packet_trans);
3184         } else if (domain == PF_NETLINK) {
3185             switch (protocol) {
3186 #ifdef CONFIG_RTNETLINK
3187             case NETLINK_ROUTE:
3188                 fd_trans_register(ret, &target_netlink_route_trans);
3189                 break;
3190 #endif
3191             case NETLINK_KOBJECT_UEVENT:
3192                 /* nothing to do: messages are strings */
3193                 break;
3194             case NETLINK_AUDIT:
3195                 fd_trans_register(ret, &target_netlink_audit_trans);
3196                 break;
3197             default:
3198                 g_assert_not_reached();
3199             }
3200         }
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_bind() Must return target values and target errnos. */
3206 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3207                         socklen_t addrlen)
3208 {
3209     void *addr;
3210     abi_long ret;
3211 
3212     if ((int)addrlen < 0) {
3213         return -TARGET_EINVAL;
3214     }
3215 
3216     addr = alloca(addrlen+1);
3217 
3218     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3219     if (ret)
3220         return ret;
3221 
3222     return get_errno(bind(sockfd, addr, addrlen));
3223 }
3224 
3225 /* do_connect() Must return target values and target errnos. */
3226 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3227                            socklen_t addrlen)
3228 {
3229     void *addr;
3230     abi_long ret;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     addr = alloca(addrlen+1);
3237 
3238     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3239     if (ret)
3240         return ret;
3241 
3242     return get_errno(safe_connect(sockfd, addr, addrlen));
3243 }
3244 
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3246 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3247                                       int flags, int send)
3248 {
3249     abi_long ret, len;
3250     struct msghdr msg;
3251     abi_ulong count;
3252     struct iovec *vec;
3253     abi_ulong target_vec;
3254 
3255     if (msgp->msg_name) {
3256         msg.msg_namelen = tswap32(msgp->msg_namelen);
3257         msg.msg_name = alloca(msg.msg_namelen+1);
3258         ret = target_to_host_sockaddr(fd, msg.msg_name,
3259                                       tswapal(msgp->msg_name),
3260                                       msg.msg_namelen);
3261         if (ret == -TARGET_EFAULT) {
3262             /* For connected sockets msg_name and msg_namelen must
3263              * be ignored, so returning EFAULT immediately is wrong.
3264              * Instead, pass a bad msg_name to the host kernel, and
3265              * let it decide whether to return EFAULT or not.
3266              */
3267             msg.msg_name = (void *)-1;
3268         } else if (ret) {
3269             goto out2;
3270         }
3271     } else {
3272         msg.msg_name = NULL;
3273         msg.msg_namelen = 0;
3274     }
3275     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3276     msg.msg_control = alloca(msg.msg_controllen);
3277     memset(msg.msg_control, 0, msg.msg_controllen);
3278 
3279     msg.msg_flags = tswap32(msgp->msg_flags);
3280 
3281     count = tswapal(msgp->msg_iovlen);
3282     target_vec = tswapal(msgp->msg_iov);
3283 
3284     if (count > IOV_MAX) {
3285         /* sendrcvmsg returns a different errno for this condition than
3286          * readv/writev, so we must catch it here before lock_iovec() does.
3287          */
3288         ret = -TARGET_EMSGSIZE;
3289         goto out2;
3290     }
3291 
3292     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3293                      target_vec, count, send);
3294     if (vec == NULL) {
3295         ret = -host_to_target_errno(errno);
3296         goto out2;
3297     }
3298     msg.msg_iovlen = count;
3299     msg.msg_iov = vec;
3300 
3301     if (send) {
3302         if (fd_trans_target_to_host_data(fd)) {
3303             void *host_msg;
3304 
3305             host_msg = g_malloc(msg.msg_iov->iov_len);
3306             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3307             ret = fd_trans_target_to_host_data(fd)(host_msg,
3308                                                    msg.msg_iov->iov_len);
3309             if (ret >= 0) {
3310                 msg.msg_iov->iov_base = host_msg;
3311                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3312             }
3313             g_free(host_msg);
3314         } else {
3315             ret = target_to_host_cmsg(&msg, msgp);
3316             if (ret == 0) {
3317                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3318             }
3319         }
3320     } else {
3321         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3322         if (!is_error(ret)) {
3323             len = ret;
3324             if (fd_trans_host_to_target_data(fd)) {
3325                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3326                                                MIN(msg.msg_iov->iov_len, len));
3327             }
3328             if (!is_error(ret)) {
3329                 ret = host_to_target_cmsg(msgp, &msg);
3330             }
3331             if (!is_error(ret)) {
3332                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3333                 msgp->msg_flags = tswap32(msg.msg_flags);
3334                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3335                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3336                                     msg.msg_name, msg.msg_namelen);
3337                     if (ret) {
3338                         goto out;
3339                     }
3340                 }
3341 
3342                 ret = len;
3343             }
3344         }
3345     }
3346 
3347 out:
3348     unlock_iovec(vec, target_vec, count, !send);
3349 out2:
3350     return ret;
3351 }
3352 
3353 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3354                                int flags, int send)
3355 {
3356     abi_long ret;
3357     struct target_msghdr *msgp;
3358 
3359     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3360                           msgp,
3361                           target_msg,
3362                           send ? 1 : 0)) {
3363         return -TARGET_EFAULT;
3364     }
3365     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3366     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3367     return ret;
3368 }
3369 
3370 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3371  * so it might not have this *mmsg-specific flag either.
3372  */
3373 #ifndef MSG_WAITFORONE
3374 #define MSG_WAITFORONE 0x10000
3375 #endif
3376 
3377 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3378                                 unsigned int vlen, unsigned int flags,
3379                                 int send)
3380 {
3381     struct target_mmsghdr *mmsgp;
3382     abi_long ret = 0;
3383     int i;
3384 
3385     if (vlen > UIO_MAXIOV) {
3386         vlen = UIO_MAXIOV;
3387     }
3388 
3389     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3390     if (!mmsgp) {
3391         return -TARGET_EFAULT;
3392     }
3393 
3394     for (i = 0; i < vlen; i++) {
3395         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3396         if (is_error(ret)) {
3397             break;
3398         }
3399         mmsgp[i].msg_len = tswap32(ret);
3400         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3401         if (flags & MSG_WAITFORONE) {
3402             flags |= MSG_DONTWAIT;
3403         }
3404     }
3405 
3406     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3407 
3408     /* Return number of datagrams sent if we sent any at all;
3409      * otherwise return the error.
3410      */
3411     if (i) {
3412         return i;
3413     }
3414     return ret;
3415 }
3416 
3417 /* do_accept4() Must return target values and target errnos. */
3418 static abi_long do_accept4(int fd, abi_ulong target_addr,
3419                            abi_ulong target_addrlen_addr, int flags)
3420 {
3421     socklen_t addrlen, ret_addrlen;
3422     void *addr;
3423     abi_long ret;
3424     int host_flags;
3425 
3426     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3427 
3428     if (target_addr == 0) {
3429         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3430     }
3431 
3432     /* linux returns EFAULT if addrlen pointer is invalid */
3433     if (get_user_u32(addrlen, target_addrlen_addr))
3434         return -TARGET_EFAULT;
3435 
3436     if ((int)addrlen < 0) {
3437         return -TARGET_EINVAL;
3438     }
3439 
3440     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3441         return -TARGET_EFAULT;
3442     }
3443 
3444     addr = alloca(addrlen);
3445 
3446     ret_addrlen = addrlen;
3447     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3448     if (!is_error(ret)) {
3449         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3450         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3451             ret = -TARGET_EFAULT;
3452         }
3453     }
3454     return ret;
3455 }
3456 
3457 /* do_getpeername() Must return target values and target errnos. */
3458 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3459                                abi_ulong target_addrlen_addr)
3460 {
3461     socklen_t addrlen, ret_addrlen;
3462     void *addr;
3463     abi_long ret;
3464 
3465     if (get_user_u32(addrlen, target_addrlen_addr))
3466         return -TARGET_EFAULT;
3467 
3468     if ((int)addrlen < 0) {
3469         return -TARGET_EINVAL;
3470     }
3471 
3472     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3473         return -TARGET_EFAULT;
3474     }
3475 
3476     addr = alloca(addrlen);
3477 
3478     ret_addrlen = addrlen;
3479     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3480     if (!is_error(ret)) {
3481         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3482         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3483             ret = -TARGET_EFAULT;
3484         }
3485     }
3486     return ret;
3487 }
3488 
3489 /* do_getsockname() Must return target values and target errnos. */
3490 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3491                                abi_ulong target_addrlen_addr)
3492 {
3493     socklen_t addrlen, ret_addrlen;
3494     void *addr;
3495     abi_long ret;
3496 
3497     if (get_user_u32(addrlen, target_addrlen_addr))
3498         return -TARGET_EFAULT;
3499 
3500     if ((int)addrlen < 0) {
3501         return -TARGET_EINVAL;
3502     }
3503 
3504     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3505         return -TARGET_EFAULT;
3506     }
3507 
3508     addr = alloca(addrlen);
3509 
3510     ret_addrlen = addrlen;
3511     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3512     if (!is_error(ret)) {
3513         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3514         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3515             ret = -TARGET_EFAULT;
3516         }
3517     }
3518     return ret;
3519 }
3520 
3521 /* do_socketpair() Must return target values and target errnos. */
3522 static abi_long do_socketpair(int domain, int type, int protocol,
3523                               abi_ulong target_tab_addr)
3524 {
3525     int tab[2];
3526     abi_long ret;
3527 
3528     target_to_host_sock_type(&type);
3529 
3530     ret = get_errno(socketpair(domain, type, protocol, tab));
3531     if (!is_error(ret)) {
3532         if (put_user_s32(tab[0], target_tab_addr)
3533             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3534             ret = -TARGET_EFAULT;
3535     }
3536     return ret;
3537 }
3538 
3539 /* do_sendto() Must return target values and target errnos. */
3540 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3541                           abi_ulong target_addr, socklen_t addrlen)
3542 {
3543     void *addr;
3544     void *host_msg;
3545     void *copy_msg = NULL;
3546     abi_long ret;
3547 
3548     if ((int)addrlen < 0) {
3549         return -TARGET_EINVAL;
3550     }
3551 
3552     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3553     if (!host_msg)
3554         return -TARGET_EFAULT;
3555     if (fd_trans_target_to_host_data(fd)) {
3556         copy_msg = host_msg;
3557         host_msg = g_malloc(len);
3558         memcpy(host_msg, copy_msg, len);
3559         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3560         if (ret < 0) {
3561             goto fail;
3562         }
3563     }
3564     if (target_addr) {
3565         addr = alloca(addrlen+1);
3566         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3567         if (ret) {
3568             goto fail;
3569         }
3570         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3571     } else {
3572         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3573     }
3574 fail:
3575     if (copy_msg) {
3576         g_free(host_msg);
3577         host_msg = copy_msg;
3578     }
3579     unlock_user(host_msg, msg, 0);
3580     return ret;
3581 }
3582 
3583 /* do_recvfrom() Must return target values and target errnos. */
3584 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3585                             abi_ulong target_addr,
3586                             abi_ulong target_addrlen)
3587 {
3588     socklen_t addrlen, ret_addrlen;
3589     void *addr;
3590     void *host_msg;
3591     abi_long ret;
3592 
3593     if (!msg) {
3594         host_msg = NULL;
3595     } else {
3596         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3597         if (!host_msg) {
3598             return -TARGET_EFAULT;
3599         }
3600     }
3601     if (target_addr) {
3602         if (get_user_u32(addrlen, target_addrlen)) {
3603             ret = -TARGET_EFAULT;
3604             goto fail;
3605         }
3606         if ((int)addrlen < 0) {
3607             ret = -TARGET_EINVAL;
3608             goto fail;
3609         }
3610         addr = alloca(addrlen);
3611         ret_addrlen = addrlen;
3612         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3613                                       addr, &ret_addrlen));
3614     } else {
3615         addr = NULL; /* To keep compiler quiet.  */
3616         addrlen = 0; /* To keep compiler quiet.  */
3617         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3618     }
3619     if (!is_error(ret)) {
3620         if (fd_trans_host_to_target_data(fd)) {
3621             abi_long trans;
3622             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3623             if (is_error(trans)) {
3624                 ret = trans;
3625                 goto fail;
3626             }
3627         }
3628         if (target_addr) {
3629             host_to_target_sockaddr(target_addr, addr,
3630                                     MIN(addrlen, ret_addrlen));
3631             if (put_user_u32(ret_addrlen, target_addrlen)) {
3632                 ret = -TARGET_EFAULT;
3633                 goto fail;
3634             }
3635         }
3636         unlock_user(host_msg, msg, len);
3637     } else {
3638 fail:
3639         unlock_user(host_msg, msg, 0);
3640     }
3641     return ret;
3642 }
3643 
3644 #ifdef TARGET_NR_socketcall
3645 /* do_socketcall() must return target values and target errnos. */
3646 static abi_long do_socketcall(int num, abi_ulong vptr)
3647 {
3648     static const unsigned nargs[] = { /* number of arguments per operation */
3649         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3650         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3651         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3652         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3653         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3654         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3655         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3656         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3657         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3658         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3659         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3660         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3661         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3662         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3663         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3664         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3665         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3666         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3667         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3668         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3669     };
3670     abi_long a[6]; /* max 6 args */
3671     unsigned i;
3672 
3673     /* check the range of the first argument num */
3674     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3675     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3676         return -TARGET_EINVAL;
3677     }
3678     /* ensure we have space for args */
3679     if (nargs[num] > ARRAY_SIZE(a)) {
3680         return -TARGET_EINVAL;
3681     }
3682     /* collect the arguments in a[] according to nargs[] */
3683     for (i = 0; i < nargs[num]; ++i) {
3684         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3685             return -TARGET_EFAULT;
3686         }
3687     }
3688     /* now when we have the args, invoke the appropriate underlying function */
3689     switch (num) {
3690     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3691         return do_socket(a[0], a[1], a[2]);
3692     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3693         return do_bind(a[0], a[1], a[2]);
3694     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3695         return do_connect(a[0], a[1], a[2]);
3696     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3697         return get_errno(listen(a[0], a[1]));
3698     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3699         return do_accept4(a[0], a[1], a[2], 0);
3700     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3701         return do_getsockname(a[0], a[1], a[2]);
3702     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3703         return do_getpeername(a[0], a[1], a[2]);
3704     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3705         return do_socketpair(a[0], a[1], a[2], a[3]);
3706     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3707         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3708     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3709         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3710     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3711         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3712     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3713         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3714     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3715         return get_errno(shutdown(a[0], a[1]));
3716     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3717         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3718     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3719         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3720     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3721         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3722     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3723         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3724     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3725         return do_accept4(a[0], a[1], a[2], a[3]);
3726     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3727         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3728     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3729         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3730     default:
3731         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3732         return -TARGET_EINVAL;
3733     }
3734 }
3735 #endif
3736 
3737 #define N_SHM_REGIONS	32
3738 
3739 static struct shm_region {
3740     abi_ulong start;
3741     abi_ulong size;
3742     bool in_use;
3743 } shm_regions[N_SHM_REGIONS];
3744 
3745 #ifndef TARGET_SEMID64_DS
3746 /* asm-generic version of this struct */
3747 struct target_semid64_ds
3748 {
3749   struct target_ipc_perm sem_perm;
3750   abi_ulong sem_otime;
3751 #if TARGET_ABI_BITS == 32
3752   abi_ulong __unused1;
3753 #endif
3754   abi_ulong sem_ctime;
3755 #if TARGET_ABI_BITS == 32
3756   abi_ulong __unused2;
3757 #endif
3758   abi_ulong sem_nsems;
3759   abi_ulong __unused3;
3760   abi_ulong __unused4;
3761 };
3762 #endif
3763 
3764 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3765                                                abi_ulong target_addr)
3766 {
3767     struct target_ipc_perm *target_ip;
3768     struct target_semid64_ds *target_sd;
3769 
3770     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3771         return -TARGET_EFAULT;
3772     target_ip = &(target_sd->sem_perm);
3773     host_ip->__key = tswap32(target_ip->__key);
3774     host_ip->uid = tswap32(target_ip->uid);
3775     host_ip->gid = tswap32(target_ip->gid);
3776     host_ip->cuid = tswap32(target_ip->cuid);
3777     host_ip->cgid = tswap32(target_ip->cgid);
3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3779     host_ip->mode = tswap32(target_ip->mode);
3780 #else
3781     host_ip->mode = tswap16(target_ip->mode);
3782 #endif
3783 #if defined(TARGET_PPC)
3784     host_ip->__seq = tswap32(target_ip->__seq);
3785 #else
3786     host_ip->__seq = tswap16(target_ip->__seq);
3787 #endif
3788     unlock_user_struct(target_sd, target_addr, 0);
3789     return 0;
3790 }
3791 
3792 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3793                                                struct ipc_perm *host_ip)
3794 {
3795     struct target_ipc_perm *target_ip;
3796     struct target_semid64_ds *target_sd;
3797 
3798     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3799         return -TARGET_EFAULT;
3800     target_ip = &(target_sd->sem_perm);
3801     target_ip->__key = tswap32(host_ip->__key);
3802     target_ip->uid = tswap32(host_ip->uid);
3803     target_ip->gid = tswap32(host_ip->gid);
3804     target_ip->cuid = tswap32(host_ip->cuid);
3805     target_ip->cgid = tswap32(host_ip->cgid);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807     target_ip->mode = tswap32(host_ip->mode);
3808 #else
3809     target_ip->mode = tswap16(host_ip->mode);
3810 #endif
3811 #if defined(TARGET_PPC)
3812     target_ip->__seq = tswap32(host_ip->__seq);
3813 #else
3814     target_ip->__seq = tswap16(host_ip->__seq);
3815 #endif
3816     unlock_user_struct(target_sd, target_addr, 1);
3817     return 0;
3818 }
3819 
3820 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3821                                                abi_ulong target_addr)
3822 {
3823     struct target_semid64_ds *target_sd;
3824 
3825     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3826         return -TARGET_EFAULT;
3827     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3828         return -TARGET_EFAULT;
3829     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3830     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3831     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3832     unlock_user_struct(target_sd, target_addr, 0);
3833     return 0;
3834 }
3835 
3836 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3837                                                struct semid_ds *host_sd)
3838 {
3839     struct target_semid64_ds *target_sd;
3840 
3841     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3842         return -TARGET_EFAULT;
3843     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3844         return -TARGET_EFAULT;
3845     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3846     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3847     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3848     unlock_user_struct(target_sd, target_addr, 1);
3849     return 0;
3850 }
3851 
3852 struct target_seminfo {
3853     int semmap;
3854     int semmni;
3855     int semmns;
3856     int semmnu;
3857     int semmsl;
3858     int semopm;
3859     int semume;
3860     int semusz;
3861     int semvmx;
3862     int semaem;
3863 };
3864 
3865 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3866                                               struct seminfo *host_seminfo)
3867 {
3868     struct target_seminfo *target_seminfo;
3869     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3870         return -TARGET_EFAULT;
3871     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3872     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3873     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3874     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3875     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3876     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3877     __put_user(host_seminfo->semume, &target_seminfo->semume);
3878     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3879     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3880     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3881     unlock_user_struct(target_seminfo, target_addr, 1);
3882     return 0;
3883 }
3884 
3885 union semun {
3886 	int val;
3887 	struct semid_ds *buf;
3888 	unsigned short *array;
3889 	struct seminfo *__buf;
3890 };
3891 
3892 union target_semun {
3893 	int val;
3894 	abi_ulong buf;
3895 	abi_ulong array;
3896 	abi_ulong __buf;
3897 };
3898 
3899 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3900                                                abi_ulong target_addr)
3901 {
3902     int nsems;
3903     unsigned short *array;
3904     union semun semun;
3905     struct semid_ds semid_ds;
3906     int i, ret;
3907 
3908     semun.buf = &semid_ds;
3909 
3910     ret = semctl(semid, 0, IPC_STAT, semun);
3911     if (ret == -1)
3912         return get_errno(ret);
3913 
3914     nsems = semid_ds.sem_nsems;
3915 
3916     *host_array = g_try_new(unsigned short, nsems);
3917     if (!*host_array) {
3918         return -TARGET_ENOMEM;
3919     }
3920     array = lock_user(VERIFY_READ, target_addr,
3921                       nsems*sizeof(unsigned short), 1);
3922     if (!array) {
3923         g_free(*host_array);
3924         return -TARGET_EFAULT;
3925     }
3926 
3927     for(i=0; i<nsems; i++) {
3928         __get_user((*host_array)[i], &array[i]);
3929     }
3930     unlock_user(array, target_addr, 0);
3931 
3932     return 0;
3933 }
3934 
3935 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3936                                                unsigned short **host_array)
3937 {
3938     int nsems;
3939     unsigned short *array;
3940     union semun semun;
3941     struct semid_ds semid_ds;
3942     int i, ret;
3943 
3944     semun.buf = &semid_ds;
3945 
3946     ret = semctl(semid, 0, IPC_STAT, semun);
3947     if (ret == -1)
3948         return get_errno(ret);
3949 
3950     nsems = semid_ds.sem_nsems;
3951 
3952     array = lock_user(VERIFY_WRITE, target_addr,
3953                       nsems*sizeof(unsigned short), 0);
3954     if (!array)
3955         return -TARGET_EFAULT;
3956 
3957     for(i=0; i<nsems; i++) {
3958         __put_user((*host_array)[i], &array[i]);
3959     }
3960     g_free(*host_array);
3961     unlock_user(array, target_addr, 1);
3962 
3963     return 0;
3964 }
3965 
3966 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3967                                  abi_ulong target_arg)
3968 {
3969     union target_semun target_su = { .buf = target_arg };
3970     union semun arg;
3971     struct semid_ds dsarg;
3972     unsigned short *array = NULL;
3973     struct seminfo seminfo;
3974     abi_long ret = -TARGET_EINVAL;
3975     abi_long err;
3976     cmd &= 0xff;
3977 
3978     switch( cmd ) {
3979 	case GETVAL:
3980 	case SETVAL:
3981             /* In 64 bit cross-endian situations, we will erroneously pick up
3982              * the wrong half of the union for the "val" element.  To rectify
3983              * this, the entire 8-byte structure is byteswapped, followed by
3984 	     * a swap of the 4 byte val field. In other cases, the data is
3985 	     * already in proper host byte order. */
3986 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3987 		target_su.buf = tswapal(target_su.buf);
3988 		arg.val = tswap32(target_su.val);
3989 	    } else {
3990 		arg.val = target_su.val;
3991 	    }
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             break;
3994 	case GETALL:
3995 	case SETALL:
3996             err = target_to_host_semarray(semid, &array, target_su.array);
3997             if (err)
3998                 return err;
3999             arg.array = array;
4000             ret = get_errno(semctl(semid, semnum, cmd, arg));
4001             err = host_to_target_semarray(semid, target_su.array, &array);
4002             if (err)
4003                 return err;
4004             break;
4005 	case IPC_STAT:
4006 	case IPC_SET:
4007 	case SEM_STAT:
4008             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4009             if (err)
4010                 return err;
4011             arg.buf = &dsarg;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_INFO:
4018 	case SEM_INFO:
4019             arg.__buf = &seminfo;
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4022             if (err)
4023                 return err;
4024             break;
4025 	case IPC_RMID:
4026 	case GETPID:
4027 	case GETNCNT:
4028 	case GETZCNT:
4029             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4030             break;
4031     }
4032 
4033     return ret;
4034 }
4035 
4036 struct target_sembuf {
4037     unsigned short sem_num;
4038     short sem_op;
4039     short sem_flg;
4040 };
4041 
4042 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4043                                              abi_ulong target_addr,
4044                                              unsigned nsops)
4045 {
4046     struct target_sembuf *target_sembuf;
4047     int i;
4048 
4049     target_sembuf = lock_user(VERIFY_READ, target_addr,
4050                               nsops*sizeof(struct target_sembuf), 1);
4051     if (!target_sembuf)
4052         return -TARGET_EFAULT;
4053 
4054     for(i=0; i<nsops; i++) {
4055         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4056         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4057         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4058     }
4059 
4060     unlock_user(target_sembuf, target_addr, 0);
4061 
4062     return 0;
4063 }
4064 
4065 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4066     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4067 
4068 /*
4069  * This macro is required to handle the s390 variants, which passes the
4070  * arguments in a different order than default.
4071  */
4072 #ifdef __s390x__
4073 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4074   (__nsops), (__timeout), (__sops)
4075 #else
4076 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4077   (__nsops), 0, (__sops), (__timeout)
4078 #endif
4079 
4080 static inline abi_long do_semtimedop(int semid,
4081                                      abi_long ptr,
4082                                      unsigned nsops,
4083                                      abi_long timeout, bool time64)
4084 {
4085     struct sembuf *sops;
4086     struct timespec ts, *pts = NULL;
4087     abi_long ret;
4088 
4089     if (timeout) {
4090         pts = &ts;
4091         if (time64) {
4092             if (target_to_host_timespec64(pts, timeout)) {
4093                 return -TARGET_EFAULT;
4094             }
4095         } else {
4096             if (target_to_host_timespec(pts, timeout)) {
4097                 return -TARGET_EFAULT;
4098             }
4099         }
4100     }
4101 
4102     if (nsops > TARGET_SEMOPM) {
4103         return -TARGET_E2BIG;
4104     }
4105 
4106     sops = g_new(struct sembuf, nsops);
4107 
4108     if (target_to_host_sembuf(sops, ptr, nsops)) {
4109         g_free(sops);
4110         return -TARGET_EFAULT;
4111     }
4112 
4113     ret = -TARGET_ENOSYS;
4114 #ifdef __NR_semtimedop
4115     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4116 #endif
4117 #ifdef __NR_ipc
4118     if (ret == -TARGET_ENOSYS) {
4119         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4120                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4121     }
4122 #endif
4123     g_free(sops);
4124     return ret;
4125 }
4126 #endif
4127 
4128 struct target_msqid_ds
4129 {
4130     struct target_ipc_perm msg_perm;
4131     abi_ulong msg_stime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused1;
4134 #endif
4135     abi_ulong msg_rtime;
4136 #if TARGET_ABI_BITS == 32
4137     abi_ulong __unused2;
4138 #endif
4139     abi_ulong msg_ctime;
4140 #if TARGET_ABI_BITS == 32
4141     abi_ulong __unused3;
4142 #endif
4143     abi_ulong __msg_cbytes;
4144     abi_ulong msg_qnum;
4145     abi_ulong msg_qbytes;
4146     abi_ulong msg_lspid;
4147     abi_ulong msg_lrpid;
4148     abi_ulong __unused4;
4149     abi_ulong __unused5;
4150 };
4151 
4152 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4153                                                abi_ulong target_addr)
4154 {
4155     struct target_msqid_ds *target_md;
4156 
4157     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4158         return -TARGET_EFAULT;
4159     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4160         return -TARGET_EFAULT;
4161     host_md->msg_stime = tswapal(target_md->msg_stime);
4162     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4163     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4164     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4165     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4166     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4167     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4168     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4169     unlock_user_struct(target_md, target_addr, 0);
4170     return 0;
4171 }
4172 
4173 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4174                                                struct msqid_ds *host_md)
4175 {
4176     struct target_msqid_ds *target_md;
4177 
4178     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4179         return -TARGET_EFAULT;
4180     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4181         return -TARGET_EFAULT;
4182     target_md->msg_stime = tswapal(host_md->msg_stime);
4183     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4184     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4185     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4186     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4187     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4188     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4189     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4190     unlock_user_struct(target_md, target_addr, 1);
4191     return 0;
4192 }
4193 
4194 struct target_msginfo {
4195     int msgpool;
4196     int msgmap;
4197     int msgmax;
4198     int msgmnb;
4199     int msgmni;
4200     int msgssz;
4201     int msgtql;
4202     unsigned short int msgseg;
4203 };
4204 
4205 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4206                                               struct msginfo *host_msginfo)
4207 {
4208     struct target_msginfo *target_msginfo;
4209     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4210         return -TARGET_EFAULT;
4211     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4212     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4213     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4214     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4215     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4216     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4217     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4218     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4219     unlock_user_struct(target_msginfo, target_addr, 1);
4220     return 0;
4221 }
4222 
4223 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4224 {
4225     struct msqid_ds dsarg;
4226     struct msginfo msginfo;
4227     abi_long ret = -TARGET_EINVAL;
4228 
4229     cmd &= 0xff;
4230 
4231     switch (cmd) {
4232     case IPC_STAT:
4233     case IPC_SET:
4234     case MSG_STAT:
4235         if (target_to_host_msqid_ds(&dsarg,ptr))
4236             return -TARGET_EFAULT;
4237         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4238         if (host_to_target_msqid_ds(ptr,&dsarg))
4239             return -TARGET_EFAULT;
4240         break;
4241     case IPC_RMID:
4242         ret = get_errno(msgctl(msgid, cmd, NULL));
4243         break;
4244     case IPC_INFO:
4245     case MSG_INFO:
4246         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4247         if (host_to_target_msginfo(ptr, &msginfo))
4248             return -TARGET_EFAULT;
4249         break;
4250     }
4251 
4252     return ret;
4253 }
4254 
4255 struct target_msgbuf {
4256     abi_long mtype;
4257     char	mtext[1];
4258 };
4259 
4260 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4261                                  ssize_t msgsz, int msgflg)
4262 {
4263     struct target_msgbuf *target_mb;
4264     struct msgbuf *host_mb;
4265     abi_long ret = 0;
4266 
4267     if (msgsz < 0) {
4268         return -TARGET_EINVAL;
4269     }
4270 
4271     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4272         return -TARGET_EFAULT;
4273     host_mb = g_try_malloc(msgsz + sizeof(long));
4274     if (!host_mb) {
4275         unlock_user_struct(target_mb, msgp, 0);
4276         return -TARGET_ENOMEM;
4277     }
4278     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4279     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4280     ret = -TARGET_ENOSYS;
4281 #ifdef __NR_msgsnd
4282     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4283 #endif
4284 #ifdef __NR_ipc
4285     if (ret == -TARGET_ENOSYS) {
4286 #ifdef __s390x__
4287         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4288                                  host_mb));
4289 #else
4290         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4291                                  host_mb, 0));
4292 #endif
4293     }
4294 #endif
4295     g_free(host_mb);
4296     unlock_user_struct(target_mb, msgp, 0);
4297 
4298     return ret;
4299 }
4300 
4301 #ifdef __NR_ipc
4302 #if defined(__sparc__)
4303 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4304 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4305 #elif defined(__s390x__)
4306 /* The s390 sys_ipc variant has only five parameters.  */
4307 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4308     ((long int[]){(long int)__msgp, __msgtyp})
4309 #else
4310 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4311     ((long int[]){(long int)__msgp, __msgtyp}), 0
4312 #endif
4313 #endif
4314 
4315 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4316                                  ssize_t msgsz, abi_long msgtyp,
4317                                  int msgflg)
4318 {
4319     struct target_msgbuf *target_mb;
4320     char *target_mtext;
4321     struct msgbuf *host_mb;
4322     abi_long ret = 0;
4323 
4324     if (msgsz < 0) {
4325         return -TARGET_EINVAL;
4326     }
4327 
4328     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4329         return -TARGET_EFAULT;
4330 
4331     host_mb = g_try_malloc(msgsz + sizeof(long));
4332     if (!host_mb) {
4333         ret = -TARGET_ENOMEM;
4334         goto end;
4335     }
4336     ret = -TARGET_ENOSYS;
4337 #ifdef __NR_msgrcv
4338     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4339 #endif
4340 #ifdef __NR_ipc
4341     if (ret == -TARGET_ENOSYS) {
4342         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4343                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4344     }
4345 #endif
4346 
4347     if (ret > 0) {
4348         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4349         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4350         if (!target_mtext) {
4351             ret = -TARGET_EFAULT;
4352             goto end;
4353         }
4354         memcpy(target_mb->mtext, host_mb->mtext, ret);
4355         unlock_user(target_mtext, target_mtext_addr, ret);
4356     }
4357 
4358     target_mb->mtype = tswapal(host_mb->mtype);
4359 
4360 end:
4361     if (target_mb)
4362         unlock_user_struct(target_mb, msgp, 1);
4363     g_free(host_mb);
4364     return ret;
4365 }
4366 
4367 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4368                                                abi_ulong target_addr)
4369 {
4370     struct target_shmid_ds *target_sd;
4371 
4372     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4373         return -TARGET_EFAULT;
4374     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4375         return -TARGET_EFAULT;
4376     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4377     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4378     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4379     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4380     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4381     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4382     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4383     unlock_user_struct(target_sd, target_addr, 0);
4384     return 0;
4385 }
4386 
4387 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4388                                                struct shmid_ds *host_sd)
4389 {
4390     struct target_shmid_ds *target_sd;
4391 
4392     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4393         return -TARGET_EFAULT;
4394     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4395         return -TARGET_EFAULT;
4396     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4397     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4398     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4399     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4400     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4401     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4402     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4403     unlock_user_struct(target_sd, target_addr, 1);
4404     return 0;
4405 }
4406 
4407 struct  target_shminfo {
4408     abi_ulong shmmax;
4409     abi_ulong shmmin;
4410     abi_ulong shmmni;
4411     abi_ulong shmseg;
4412     abi_ulong shmall;
4413 };
4414 
4415 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4416                                               struct shminfo *host_shminfo)
4417 {
4418     struct target_shminfo *target_shminfo;
4419     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4420         return -TARGET_EFAULT;
4421     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4422     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4423     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4424     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4425     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4426     unlock_user_struct(target_shminfo, target_addr, 1);
4427     return 0;
4428 }
4429 
4430 struct target_shm_info {
4431     int used_ids;
4432     abi_ulong shm_tot;
4433     abi_ulong shm_rss;
4434     abi_ulong shm_swp;
4435     abi_ulong swap_attempts;
4436     abi_ulong swap_successes;
4437 };
4438 
4439 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4440                                                struct shm_info *host_shm_info)
4441 {
4442     struct target_shm_info *target_shm_info;
4443     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4444         return -TARGET_EFAULT;
4445     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4446     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4447     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4448     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4449     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4450     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4451     unlock_user_struct(target_shm_info, target_addr, 1);
4452     return 0;
4453 }
4454 
4455 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4456 {
4457     struct shmid_ds dsarg;
4458     struct shminfo shminfo;
4459     struct shm_info shm_info;
4460     abi_long ret = -TARGET_EINVAL;
4461 
4462     cmd &= 0xff;
4463 
4464     switch(cmd) {
4465     case IPC_STAT:
4466     case IPC_SET:
4467     case SHM_STAT:
4468         if (target_to_host_shmid_ds(&dsarg, buf))
4469             return -TARGET_EFAULT;
4470         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4471         if (host_to_target_shmid_ds(buf, &dsarg))
4472             return -TARGET_EFAULT;
4473         break;
4474     case IPC_INFO:
4475         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4476         if (host_to_target_shminfo(buf, &shminfo))
4477             return -TARGET_EFAULT;
4478         break;
4479     case SHM_INFO:
4480         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4481         if (host_to_target_shm_info(buf, &shm_info))
4482             return -TARGET_EFAULT;
4483         break;
4484     case IPC_RMID:
4485     case SHM_LOCK:
4486     case SHM_UNLOCK:
4487         ret = get_errno(shmctl(shmid, cmd, NULL));
4488         break;
4489     }
4490 
4491     return ret;
4492 }
4493 
4494 #ifndef TARGET_FORCE_SHMLBA
4495 /* For most architectures, SHMLBA is the same as the page size;
4496  * some architectures have larger values, in which case they should
4497  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4498  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4499  * and defining its own value for SHMLBA.
4500  *
4501  * The kernel also permits SHMLBA to be set by the architecture to a
4502  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4503  * this means that addresses are rounded to the large size if
4504  * SHM_RND is set but addresses not aligned to that size are not rejected
4505  * as long as they are at least page-aligned. Since the only architecture
4506  * which uses this is ia64 this code doesn't provide for that oddity.
4507  */
4508 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4509 {
4510     return TARGET_PAGE_SIZE;
4511 }
4512 #endif
4513 
4514 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4515                                  int shmid, abi_ulong shmaddr, int shmflg)
4516 {
4517     CPUState *cpu = env_cpu(cpu_env);
4518     abi_long raddr;
4519     void *host_raddr;
4520     struct shmid_ds shm_info;
4521     int i,ret;
4522     abi_ulong shmlba;
4523 
4524     /* shmat pointers are always untagged */
4525 
4526     /* find out the length of the shared memory segment */
4527     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4528     if (is_error(ret)) {
4529         /* can't get length, bail out */
4530         return ret;
4531     }
4532 
4533     shmlba = target_shmlba(cpu_env);
4534 
4535     if (shmaddr & (shmlba - 1)) {
4536         if (shmflg & SHM_RND) {
4537             shmaddr &= ~(shmlba - 1);
4538         } else {
4539             return -TARGET_EINVAL;
4540         }
4541     }
4542     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4543         return -TARGET_EINVAL;
4544     }
4545 
4546     mmap_lock();
4547 
4548     /*
4549      * We're mapping shared memory, so ensure we generate code for parallel
4550      * execution and flush old translations.  This will work up to the level
4551      * supported by the host -- anything that requires EXCP_ATOMIC will not
4552      * be atomic with respect to an external process.
4553      */
4554     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4555         cpu->tcg_cflags |= CF_PARALLEL;
4556         tb_flush(cpu);
4557     }
4558 
4559     if (shmaddr)
4560         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4561     else {
4562         abi_ulong mmap_start;
4563 
4564         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4565         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4566 
4567         if (mmap_start == -1) {
4568             errno = ENOMEM;
4569             host_raddr = (void *)-1;
4570         } else
4571             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4572                                shmflg | SHM_REMAP);
4573     }
4574 
4575     if (host_raddr == (void *)-1) {
4576         mmap_unlock();
4577         return get_errno((long)host_raddr);
4578     }
4579     raddr=h2g((unsigned long)host_raddr);
4580 
4581     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4582                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4583                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4584 
4585     for (i = 0; i < N_SHM_REGIONS; i++) {
4586         if (!shm_regions[i].in_use) {
4587             shm_regions[i].in_use = true;
4588             shm_regions[i].start = raddr;
4589             shm_regions[i].size = shm_info.shm_segsz;
4590             break;
4591         }
4592     }
4593 
4594     mmap_unlock();
4595     return raddr;
4596 
4597 }
4598 
4599 static inline abi_long do_shmdt(abi_ulong shmaddr)
4600 {
4601     int i;
4602     abi_long rv;
4603 
4604     /* shmdt pointers are always untagged */
4605 
4606     mmap_lock();
4607 
4608     for (i = 0; i < N_SHM_REGIONS; ++i) {
4609         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4610             shm_regions[i].in_use = false;
4611             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4612             break;
4613         }
4614     }
4615     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4616 
4617     mmap_unlock();
4618 
4619     return rv;
4620 }
4621 
4622 #ifdef TARGET_NR_ipc
4623 /* ??? This only works with linear mappings.  */
4624 /* do_ipc() must return target values and target errnos. */
4625 static abi_long do_ipc(CPUArchState *cpu_env,
4626                        unsigned int call, abi_long first,
4627                        abi_long second, abi_long third,
4628                        abi_long ptr, abi_long fifth)
4629 {
4630     int version;
4631     abi_long ret = 0;
4632 
4633     version = call >> 16;
4634     call &= 0xffff;
4635 
4636     switch (call) {
4637     case IPCOP_semop:
4638         ret = do_semtimedop(first, ptr, second, 0, false);
4639         break;
4640     case IPCOP_semtimedop:
4641     /*
4642      * The s390 sys_ipc variant has only five parameters instead of six
4643      * (as for default variant) and the only difference is the handling of
4644      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4645      * to a struct timespec where the generic variant uses fifth parameter.
4646      */
4647 #if defined(TARGET_S390X)
4648         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4649 #else
4650         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4651 #endif
4652         break;
4653 
4654     case IPCOP_semget:
4655         ret = get_errno(semget(first, second, third));
4656         break;
4657 
4658     case IPCOP_semctl: {
4659         /* The semun argument to semctl is passed by value, so dereference the
4660          * ptr argument. */
4661         abi_ulong atptr;
4662         get_user_ual(atptr, ptr);
4663         ret = do_semctl(first, second, third, atptr);
4664         break;
4665     }
4666 
4667     case IPCOP_msgget:
4668         ret = get_errno(msgget(first, second));
4669         break;
4670 
4671     case IPCOP_msgsnd:
4672         ret = do_msgsnd(first, ptr, second, third);
4673         break;
4674 
4675     case IPCOP_msgctl:
4676         ret = do_msgctl(first, second, ptr);
4677         break;
4678 
4679     case IPCOP_msgrcv:
4680         switch (version) {
4681         case 0:
4682             {
4683                 struct target_ipc_kludge {
4684                     abi_long msgp;
4685                     abi_long msgtyp;
4686                 } *tmp;
4687 
4688                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4689                     ret = -TARGET_EFAULT;
4690                     break;
4691                 }
4692 
4693                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4694 
4695                 unlock_user_struct(tmp, ptr, 0);
4696                 break;
4697             }
4698         default:
4699             ret = do_msgrcv(first, ptr, second, fifth, third);
4700         }
4701         break;
4702 
4703     case IPCOP_shmat:
4704         switch (version) {
4705         default:
4706         {
4707             abi_ulong raddr;
4708             raddr = do_shmat(cpu_env, first, ptr, second);
4709             if (is_error(raddr))
4710                 return get_errno(raddr);
4711             if (put_user_ual(raddr, third))
4712                 return -TARGET_EFAULT;
4713             break;
4714         }
4715         case 1:
4716             ret = -TARGET_EINVAL;
4717             break;
4718         }
4719 	break;
4720     case IPCOP_shmdt:
4721         ret = do_shmdt(ptr);
4722 	break;
4723 
4724     case IPCOP_shmget:
4725 	/* IPC_* flag values are the same on all linux platforms */
4726 	ret = get_errno(shmget(first, second, third));
4727 	break;
4728 
4729 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4730     case IPCOP_shmctl:
4731         ret = do_shmctl(first, second, ptr);
4732         break;
4733     default:
4734         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4735                       call, version);
4736 	ret = -TARGET_ENOSYS;
4737 	break;
4738     }
4739     return ret;
4740 }
4741 #endif
4742 
4743 /* kernel structure types definitions */
4744 
4745 #define STRUCT(name, ...) STRUCT_ ## name,
4746 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4747 enum {
4748 #include "syscall_types.h"
4749 STRUCT_MAX
4750 };
4751 #undef STRUCT
4752 #undef STRUCT_SPECIAL
4753 
4754 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4755 #define STRUCT_SPECIAL(name)
4756 #include "syscall_types.h"
4757 #undef STRUCT
4758 #undef STRUCT_SPECIAL
4759 
4760 #define MAX_STRUCT_SIZE 4096
4761 
4762 #ifdef CONFIG_FIEMAP
4763 /* So fiemap access checks don't overflow on 32 bit systems.
4764  * This is very slightly smaller than the limit imposed by
4765  * the underlying kernel.
4766  */
4767 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4768                             / sizeof(struct fiemap_extent))
4769 
4770 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4771                                        int fd, int cmd, abi_long arg)
4772 {
4773     /* The parameter for this ioctl is a struct fiemap followed
4774      * by an array of struct fiemap_extent whose size is set
4775      * in fiemap->fm_extent_count. The array is filled in by the
4776      * ioctl.
4777      */
4778     int target_size_in, target_size_out;
4779     struct fiemap *fm;
4780     const argtype *arg_type = ie->arg_type;
4781     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4782     void *argptr, *p;
4783     abi_long ret;
4784     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4785     uint32_t outbufsz;
4786     int free_fm = 0;
4787 
4788     assert(arg_type[0] == TYPE_PTR);
4789     assert(ie->access == IOC_RW);
4790     arg_type++;
4791     target_size_in = thunk_type_size(arg_type, 0);
4792     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4793     if (!argptr) {
4794         return -TARGET_EFAULT;
4795     }
4796     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4797     unlock_user(argptr, arg, 0);
4798     fm = (struct fiemap *)buf_temp;
4799     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4800         return -TARGET_EINVAL;
4801     }
4802 
4803     outbufsz = sizeof (*fm) +
4804         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4805 
4806     if (outbufsz > MAX_STRUCT_SIZE) {
4807         /* We can't fit all the extents into the fixed size buffer.
4808          * Allocate one that is large enough and use it instead.
4809          */
4810         fm = g_try_malloc(outbufsz);
4811         if (!fm) {
4812             return -TARGET_ENOMEM;
4813         }
4814         memcpy(fm, buf_temp, sizeof(struct fiemap));
4815         free_fm = 1;
4816     }
4817     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4818     if (!is_error(ret)) {
4819         target_size_out = target_size_in;
4820         /* An extent_count of 0 means we were only counting the extents
4821          * so there are no structs to copy
4822          */
4823         if (fm->fm_extent_count != 0) {
4824             target_size_out += fm->fm_mapped_extents * extent_size;
4825         }
4826         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4827         if (!argptr) {
4828             ret = -TARGET_EFAULT;
4829         } else {
4830             /* Convert the struct fiemap */
4831             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4832             if (fm->fm_extent_count != 0) {
4833                 p = argptr + target_size_in;
4834                 /* ...and then all the struct fiemap_extents */
4835                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4836                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4837                                   THUNK_TARGET);
4838                     p += extent_size;
4839                 }
4840             }
4841             unlock_user(argptr, arg, target_size_out);
4842         }
4843     }
4844     if (free_fm) {
4845         g_free(fm);
4846     }
4847     return ret;
4848 }
4849 #endif
4850 
4851 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4852                                 int fd, int cmd, abi_long arg)
4853 {
4854     const argtype *arg_type = ie->arg_type;
4855     int target_size;
4856     void *argptr;
4857     int ret;
4858     struct ifconf *host_ifconf;
4859     uint32_t outbufsz;
4860     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4861     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4862     int target_ifreq_size;
4863     int nb_ifreq;
4864     int free_buf = 0;
4865     int i;
4866     int target_ifc_len;
4867     abi_long target_ifc_buf;
4868     int host_ifc_len;
4869     char *host_ifc_buf;
4870 
4871     assert(arg_type[0] == TYPE_PTR);
4872     assert(ie->access == IOC_RW);
4873 
4874     arg_type++;
4875     target_size = thunk_type_size(arg_type, 0);
4876 
4877     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4878     if (!argptr)
4879         return -TARGET_EFAULT;
4880     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4881     unlock_user(argptr, arg, 0);
4882 
4883     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4884     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4885     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4886 
4887     if (target_ifc_buf != 0) {
4888         target_ifc_len = host_ifconf->ifc_len;
4889         nb_ifreq = target_ifc_len / target_ifreq_size;
4890         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4891 
4892         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4893         if (outbufsz > MAX_STRUCT_SIZE) {
4894             /*
4895              * We can't fit all the extents into the fixed size buffer.
4896              * Allocate one that is large enough and use it instead.
4897              */
4898             host_ifconf = g_try_malloc(outbufsz);
4899             if (!host_ifconf) {
4900                 return -TARGET_ENOMEM;
4901             }
4902             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4903             free_buf = 1;
4904         }
4905         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4906 
4907         host_ifconf->ifc_len = host_ifc_len;
4908     } else {
4909       host_ifc_buf = NULL;
4910     }
4911     host_ifconf->ifc_buf = host_ifc_buf;
4912 
4913     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4914     if (!is_error(ret)) {
4915 	/* convert host ifc_len to target ifc_len */
4916 
4917         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4918         target_ifc_len = nb_ifreq * target_ifreq_size;
4919         host_ifconf->ifc_len = target_ifc_len;
4920 
4921 	/* restore target ifc_buf */
4922 
4923         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4924 
4925 	/* copy struct ifconf to target user */
4926 
4927         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4928         if (!argptr)
4929             return -TARGET_EFAULT;
4930         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4931         unlock_user(argptr, arg, target_size);
4932 
4933         if (target_ifc_buf != 0) {
4934             /* copy ifreq[] to target user */
4935             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4936             for (i = 0; i < nb_ifreq ; i++) {
4937                 thunk_convert(argptr + i * target_ifreq_size,
4938                               host_ifc_buf + i * sizeof(struct ifreq),
4939                               ifreq_arg_type, THUNK_TARGET);
4940             }
4941             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4942         }
4943     }
4944 
4945     if (free_buf) {
4946         g_free(host_ifconf);
4947     }
4948 
4949     return ret;
4950 }
4951 
4952 #if defined(CONFIG_USBFS)
4953 #if HOST_LONG_BITS > 64
4954 #error USBDEVFS thunks do not support >64 bit hosts yet.
4955 #endif
4956 struct live_urb {
4957     uint64_t target_urb_adr;
4958     uint64_t target_buf_adr;
4959     char *target_buf_ptr;
4960     struct usbdevfs_urb host_urb;
4961 };
4962 
4963 static GHashTable *usbdevfs_urb_hashtable(void)
4964 {
4965     static GHashTable *urb_hashtable;
4966 
4967     if (!urb_hashtable) {
4968         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4969     }
4970     return urb_hashtable;
4971 }
4972 
4973 static void urb_hashtable_insert(struct live_urb *urb)
4974 {
4975     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4976     g_hash_table_insert(urb_hashtable, urb, urb);
4977 }
4978 
4979 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4980 {
4981     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4982     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4983 }
4984 
4985 static void urb_hashtable_remove(struct live_urb *urb)
4986 {
4987     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4988     g_hash_table_remove(urb_hashtable, urb);
4989 }
4990 
4991 static abi_long
4992 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4993                           int fd, int cmd, abi_long arg)
4994 {
4995     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4996     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4997     struct live_urb *lurb;
4998     void *argptr;
4999     uint64_t hurb;
5000     int target_size;
5001     uintptr_t target_urb_adr;
5002     abi_long ret;
5003 
5004     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5005 
5006     memset(buf_temp, 0, sizeof(uint64_t));
5007     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5008     if (is_error(ret)) {
5009         return ret;
5010     }
5011 
5012     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5013     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5014     if (!lurb->target_urb_adr) {
5015         return -TARGET_EFAULT;
5016     }
5017     urb_hashtable_remove(lurb);
5018     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5019         lurb->host_urb.buffer_length);
5020     lurb->target_buf_ptr = NULL;
5021 
5022     /* restore the guest buffer pointer */
5023     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5024 
5025     /* update the guest urb struct */
5026     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5027     if (!argptr) {
5028         g_free(lurb);
5029         return -TARGET_EFAULT;
5030     }
5031     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5032     unlock_user(argptr, lurb->target_urb_adr, target_size);
5033 
5034     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5035     /* write back the urb handle */
5036     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5037     if (!argptr) {
5038         g_free(lurb);
5039         return -TARGET_EFAULT;
5040     }
5041 
5042     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5043     target_urb_adr = lurb->target_urb_adr;
5044     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5045     unlock_user(argptr, arg, target_size);
5046 
5047     g_free(lurb);
5048     return ret;
5049 }
5050 
5051 static abi_long
5052 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5053                              uint8_t *buf_temp __attribute__((unused)),
5054                              int fd, int cmd, abi_long arg)
5055 {
5056     struct live_urb *lurb;
5057 
5058     /* map target address back to host URB with metadata. */
5059     lurb = urb_hashtable_lookup(arg);
5060     if (!lurb) {
5061         return -TARGET_EFAULT;
5062     }
5063     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5064 }
5065 
5066 static abi_long
5067 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5068                             int fd, int cmd, abi_long arg)
5069 {
5070     const argtype *arg_type = ie->arg_type;
5071     int target_size;
5072     abi_long ret;
5073     void *argptr;
5074     int rw_dir;
5075     struct live_urb *lurb;
5076 
5077     /*
5078      * each submitted URB needs to map to a unique ID for the
5079      * kernel, and that unique ID needs to be a pointer to
5080      * host memory.  hence, we need to malloc for each URB.
5081      * isochronous transfers have a variable length struct.
5082      */
5083     arg_type++;
5084     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5085 
5086     /* construct host copy of urb and metadata */
5087     lurb = g_try_new0(struct live_urb, 1);
5088     if (!lurb) {
5089         return -TARGET_ENOMEM;
5090     }
5091 
5092     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5093     if (!argptr) {
5094         g_free(lurb);
5095         return -TARGET_EFAULT;
5096     }
5097     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5098     unlock_user(argptr, arg, 0);
5099 
5100     lurb->target_urb_adr = arg;
5101     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5102 
5103     /* buffer space used depends on endpoint type so lock the entire buffer */
5104     /* control type urbs should check the buffer contents for true direction */
5105     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5106     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5107         lurb->host_urb.buffer_length, 1);
5108     if (lurb->target_buf_ptr == NULL) {
5109         g_free(lurb);
5110         return -TARGET_EFAULT;
5111     }
5112 
5113     /* update buffer pointer in host copy */
5114     lurb->host_urb.buffer = lurb->target_buf_ptr;
5115 
5116     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5117     if (is_error(ret)) {
5118         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5119         g_free(lurb);
5120     } else {
5121         urb_hashtable_insert(lurb);
5122     }
5123 
5124     return ret;
5125 }
5126 #endif /* CONFIG_USBFS */
5127 
5128 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5129                             int cmd, abi_long arg)
5130 {
5131     void *argptr;
5132     struct dm_ioctl *host_dm;
5133     abi_long guest_data;
5134     uint32_t guest_data_size;
5135     int target_size;
5136     const argtype *arg_type = ie->arg_type;
5137     abi_long ret;
5138     void *big_buf = NULL;
5139     char *host_data;
5140 
5141     arg_type++;
5142     target_size = thunk_type_size(arg_type, 0);
5143     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5144     if (!argptr) {
5145         ret = -TARGET_EFAULT;
5146         goto out;
5147     }
5148     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5149     unlock_user(argptr, arg, 0);
5150 
5151     /* buf_temp is too small, so fetch things into a bigger buffer */
5152     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5153     memcpy(big_buf, buf_temp, target_size);
5154     buf_temp = big_buf;
5155     host_dm = big_buf;
5156 
5157     guest_data = arg + host_dm->data_start;
5158     if ((guest_data - arg) < 0) {
5159         ret = -TARGET_EINVAL;
5160         goto out;
5161     }
5162     guest_data_size = host_dm->data_size - host_dm->data_start;
5163     host_data = (char*)host_dm + host_dm->data_start;
5164 
5165     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5166     if (!argptr) {
5167         ret = -TARGET_EFAULT;
5168         goto out;
5169     }
5170 
5171     switch (ie->host_cmd) {
5172     case DM_REMOVE_ALL:
5173     case DM_LIST_DEVICES:
5174     case DM_DEV_CREATE:
5175     case DM_DEV_REMOVE:
5176     case DM_DEV_SUSPEND:
5177     case DM_DEV_STATUS:
5178     case DM_DEV_WAIT:
5179     case DM_TABLE_STATUS:
5180     case DM_TABLE_CLEAR:
5181     case DM_TABLE_DEPS:
5182     case DM_LIST_VERSIONS:
5183         /* no input data */
5184         break;
5185     case DM_DEV_RENAME:
5186     case DM_DEV_SET_GEOMETRY:
5187         /* data contains only strings */
5188         memcpy(host_data, argptr, guest_data_size);
5189         break;
5190     case DM_TARGET_MSG:
5191         memcpy(host_data, argptr, guest_data_size);
5192         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5193         break;
5194     case DM_TABLE_LOAD:
5195     {
5196         void *gspec = argptr;
5197         void *cur_data = host_data;
5198         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5199         int spec_size = thunk_type_size(arg_type, 0);
5200         int i;
5201 
5202         for (i = 0; i < host_dm->target_count; i++) {
5203             struct dm_target_spec *spec = cur_data;
5204             uint32_t next;
5205             int slen;
5206 
5207             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5208             slen = strlen((char*)gspec + spec_size) + 1;
5209             next = spec->next;
5210             spec->next = sizeof(*spec) + slen;
5211             strcpy((char*)&spec[1], gspec + spec_size);
5212             gspec += next;
5213             cur_data += spec->next;
5214         }
5215         break;
5216     }
5217     default:
5218         ret = -TARGET_EINVAL;
5219         unlock_user(argptr, guest_data, 0);
5220         goto out;
5221     }
5222     unlock_user(argptr, guest_data, 0);
5223 
5224     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5225     if (!is_error(ret)) {
5226         guest_data = arg + host_dm->data_start;
5227         guest_data_size = host_dm->data_size - host_dm->data_start;
5228         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5229         switch (ie->host_cmd) {
5230         case DM_REMOVE_ALL:
5231         case DM_DEV_CREATE:
5232         case DM_DEV_REMOVE:
5233         case DM_DEV_RENAME:
5234         case DM_DEV_SUSPEND:
5235         case DM_DEV_STATUS:
5236         case DM_TABLE_LOAD:
5237         case DM_TABLE_CLEAR:
5238         case DM_TARGET_MSG:
5239         case DM_DEV_SET_GEOMETRY:
5240             /* no return data */
5241             break;
5242         case DM_LIST_DEVICES:
5243         {
5244             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5245             uint32_t remaining_data = guest_data_size;
5246             void *cur_data = argptr;
5247             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5248             int nl_size = 12; /* can't use thunk_size due to alignment */
5249 
5250             while (1) {
5251                 uint32_t next = nl->next;
5252                 if (next) {
5253                     nl->next = nl_size + (strlen(nl->name) + 1);
5254                 }
5255                 if (remaining_data < nl->next) {
5256                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257                     break;
5258                 }
5259                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5260                 strcpy(cur_data + nl_size, nl->name);
5261                 cur_data += nl->next;
5262                 remaining_data -= nl->next;
5263                 if (!next) {
5264                     break;
5265                 }
5266                 nl = (void*)nl + next;
5267             }
5268             break;
5269         }
5270         case DM_DEV_WAIT:
5271         case DM_TABLE_STATUS:
5272         {
5273             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5274             void *cur_data = argptr;
5275             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5276             int spec_size = thunk_type_size(arg_type, 0);
5277             int i;
5278 
5279             for (i = 0; i < host_dm->target_count; i++) {
5280                 uint32_t next = spec->next;
5281                 int slen = strlen((char*)&spec[1]) + 1;
5282                 spec->next = (cur_data - argptr) + spec_size + slen;
5283                 if (guest_data_size < spec->next) {
5284                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5285                     break;
5286                 }
5287                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5288                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5289                 cur_data = argptr + spec->next;
5290                 spec = (void*)host_dm + host_dm->data_start + next;
5291             }
5292             break;
5293         }
5294         case DM_TABLE_DEPS:
5295         {
5296             void *hdata = (void*)host_dm + host_dm->data_start;
5297             int count = *(uint32_t*)hdata;
5298             uint64_t *hdev = hdata + 8;
5299             uint64_t *gdev = argptr + 8;
5300             int i;
5301 
5302             *(uint32_t*)argptr = tswap32(count);
5303             for (i = 0; i < count; i++) {
5304                 *gdev = tswap64(*hdev);
5305                 gdev++;
5306                 hdev++;
5307             }
5308             break;
5309         }
5310         case DM_LIST_VERSIONS:
5311         {
5312             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5313             uint32_t remaining_data = guest_data_size;
5314             void *cur_data = argptr;
5315             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5316             int vers_size = thunk_type_size(arg_type, 0);
5317 
5318             while (1) {
5319                 uint32_t next = vers->next;
5320                 if (next) {
5321                     vers->next = vers_size + (strlen(vers->name) + 1);
5322                 }
5323                 if (remaining_data < vers->next) {
5324                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5325                     break;
5326                 }
5327                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5328                 strcpy(cur_data + vers_size, vers->name);
5329                 cur_data += vers->next;
5330                 remaining_data -= vers->next;
5331                 if (!next) {
5332                     break;
5333                 }
5334                 vers = (void*)vers + next;
5335             }
5336             break;
5337         }
5338         default:
5339             unlock_user(argptr, guest_data, 0);
5340             ret = -TARGET_EINVAL;
5341             goto out;
5342         }
5343         unlock_user(argptr, guest_data, guest_data_size);
5344 
5345         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5346         if (!argptr) {
5347             ret = -TARGET_EFAULT;
5348             goto out;
5349         }
5350         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5351         unlock_user(argptr, arg, target_size);
5352     }
5353 out:
5354     g_free(big_buf);
5355     return ret;
5356 }
5357 
5358 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5359                                int cmd, abi_long arg)
5360 {
5361     void *argptr;
5362     int target_size;
5363     const argtype *arg_type = ie->arg_type;
5364     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5365     abi_long ret;
5366 
5367     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5368     struct blkpg_partition host_part;
5369 
5370     /* Read and convert blkpg */
5371     arg_type++;
5372     target_size = thunk_type_size(arg_type, 0);
5373     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5374     if (!argptr) {
5375         ret = -TARGET_EFAULT;
5376         goto out;
5377     }
5378     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5379     unlock_user(argptr, arg, 0);
5380 
5381     switch (host_blkpg->op) {
5382     case BLKPG_ADD_PARTITION:
5383     case BLKPG_DEL_PARTITION:
5384         /* payload is struct blkpg_partition */
5385         break;
5386     default:
5387         /* Unknown opcode */
5388         ret = -TARGET_EINVAL;
5389         goto out;
5390     }
5391 
5392     /* Read and convert blkpg->data */
5393     arg = (abi_long)(uintptr_t)host_blkpg->data;
5394     target_size = thunk_type_size(part_arg_type, 0);
5395     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5396     if (!argptr) {
5397         ret = -TARGET_EFAULT;
5398         goto out;
5399     }
5400     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5401     unlock_user(argptr, arg, 0);
5402 
5403     /* Swizzle the data pointer to our local copy and call! */
5404     host_blkpg->data = &host_part;
5405     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5406 
5407 out:
5408     return ret;
5409 }
5410 
5411 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5412                                 int fd, int cmd, abi_long arg)
5413 {
5414     const argtype *arg_type = ie->arg_type;
5415     const StructEntry *se;
5416     const argtype *field_types;
5417     const int *dst_offsets, *src_offsets;
5418     int target_size;
5419     void *argptr;
5420     abi_ulong *target_rt_dev_ptr = NULL;
5421     unsigned long *host_rt_dev_ptr = NULL;
5422     abi_long ret;
5423     int i;
5424 
5425     assert(ie->access == IOC_W);
5426     assert(*arg_type == TYPE_PTR);
5427     arg_type++;
5428     assert(*arg_type == TYPE_STRUCT);
5429     target_size = thunk_type_size(arg_type, 0);
5430     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5431     if (!argptr) {
5432         return -TARGET_EFAULT;
5433     }
5434     arg_type++;
5435     assert(*arg_type == (int)STRUCT_rtentry);
5436     se = struct_entries + *arg_type++;
5437     assert(se->convert[0] == NULL);
5438     /* convert struct here to be able to catch rt_dev string */
5439     field_types = se->field_types;
5440     dst_offsets = se->field_offsets[THUNK_HOST];
5441     src_offsets = se->field_offsets[THUNK_TARGET];
5442     for (i = 0; i < se->nb_fields; i++) {
5443         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5444             assert(*field_types == TYPE_PTRVOID);
5445             target_rt_dev_ptr = argptr + src_offsets[i];
5446             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5447             if (*target_rt_dev_ptr != 0) {
5448                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5449                                                   tswapal(*target_rt_dev_ptr));
5450                 if (!*host_rt_dev_ptr) {
5451                     unlock_user(argptr, arg, 0);
5452                     return -TARGET_EFAULT;
5453                 }
5454             } else {
5455                 *host_rt_dev_ptr = 0;
5456             }
5457             field_types++;
5458             continue;
5459         }
5460         field_types = thunk_convert(buf_temp + dst_offsets[i],
5461                                     argptr + src_offsets[i],
5462                                     field_types, THUNK_HOST);
5463     }
5464     unlock_user(argptr, arg, 0);
5465 
5466     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5467 
5468     assert(host_rt_dev_ptr != NULL);
5469     assert(target_rt_dev_ptr != NULL);
5470     if (*host_rt_dev_ptr != 0) {
5471         unlock_user((void *)*host_rt_dev_ptr,
5472                     *target_rt_dev_ptr, 0);
5473     }
5474     return ret;
5475 }
5476 
5477 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5478                                      int fd, int cmd, abi_long arg)
5479 {
5480     int sig = target_to_host_signal(arg);
5481     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5482 }
5483 
5484 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5485                                     int fd, int cmd, abi_long arg)
5486 {
5487     struct timeval tv;
5488     abi_long ret;
5489 
5490     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5491     if (is_error(ret)) {
5492         return ret;
5493     }
5494 
5495     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5496         if (copy_to_user_timeval(arg, &tv)) {
5497             return -TARGET_EFAULT;
5498         }
5499     } else {
5500         if (copy_to_user_timeval64(arg, &tv)) {
5501             return -TARGET_EFAULT;
5502         }
5503     }
5504 
5505     return ret;
5506 }
5507 
5508 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5509                                       int fd, int cmd, abi_long arg)
5510 {
5511     struct timespec ts;
5512     abi_long ret;
5513 
5514     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5515     if (is_error(ret)) {
5516         return ret;
5517     }
5518 
5519     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5520         if (host_to_target_timespec(arg, &ts)) {
5521             return -TARGET_EFAULT;
5522         }
5523     } else{
5524         if (host_to_target_timespec64(arg, &ts)) {
5525             return -TARGET_EFAULT;
5526         }
5527     }
5528 
5529     return ret;
5530 }
5531 
5532 #ifdef TIOCGPTPEER
5533 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5534                                      int fd, int cmd, abi_long arg)
5535 {
5536     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5537     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5538 }
5539 #endif
5540 
5541 #ifdef HAVE_DRM_H
5542 
5543 static void unlock_drm_version(struct drm_version *host_ver,
5544                                struct target_drm_version *target_ver,
5545                                bool copy)
5546 {
5547     unlock_user(host_ver->name, target_ver->name,
5548                                 copy ? host_ver->name_len : 0);
5549     unlock_user(host_ver->date, target_ver->date,
5550                                 copy ? host_ver->date_len : 0);
5551     unlock_user(host_ver->desc, target_ver->desc,
5552                                 copy ? host_ver->desc_len : 0);
5553 }
5554 
5555 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5556                                           struct target_drm_version *target_ver)
5557 {
5558     memset(host_ver, 0, sizeof(*host_ver));
5559 
5560     __get_user(host_ver->name_len, &target_ver->name_len);
5561     if (host_ver->name_len) {
5562         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5563                                    target_ver->name_len, 0);
5564         if (!host_ver->name) {
5565             return -EFAULT;
5566         }
5567     }
5568 
5569     __get_user(host_ver->date_len, &target_ver->date_len);
5570     if (host_ver->date_len) {
5571         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5572                                    target_ver->date_len, 0);
5573         if (!host_ver->date) {
5574             goto err;
5575         }
5576     }
5577 
5578     __get_user(host_ver->desc_len, &target_ver->desc_len);
5579     if (host_ver->desc_len) {
5580         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5581                                    target_ver->desc_len, 0);
5582         if (!host_ver->desc) {
5583             goto err;
5584         }
5585     }
5586 
5587     return 0;
5588 err:
5589     unlock_drm_version(host_ver, target_ver, false);
5590     return -EFAULT;
5591 }
5592 
5593 static inline void host_to_target_drmversion(
5594                                           struct target_drm_version *target_ver,
5595                                           struct drm_version *host_ver)
5596 {
5597     __put_user(host_ver->version_major, &target_ver->version_major);
5598     __put_user(host_ver->version_minor, &target_ver->version_minor);
5599     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5600     __put_user(host_ver->name_len, &target_ver->name_len);
5601     __put_user(host_ver->date_len, &target_ver->date_len);
5602     __put_user(host_ver->desc_len, &target_ver->desc_len);
5603     unlock_drm_version(host_ver, target_ver, true);
5604 }
5605 
5606 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5607                              int fd, int cmd, abi_long arg)
5608 {
5609     struct drm_version *ver;
5610     struct target_drm_version *target_ver;
5611     abi_long ret;
5612 
5613     switch (ie->host_cmd) {
5614     case DRM_IOCTL_VERSION:
5615         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5616             return -TARGET_EFAULT;
5617         }
5618         ver = (struct drm_version *)buf_temp;
5619         ret = target_to_host_drmversion(ver, target_ver);
5620         if (!is_error(ret)) {
5621             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5622             if (is_error(ret)) {
5623                 unlock_drm_version(ver, target_ver, false);
5624             } else {
5625                 host_to_target_drmversion(target_ver, ver);
5626             }
5627         }
5628         unlock_user_struct(target_ver, arg, 0);
5629         return ret;
5630     }
5631     return -TARGET_ENOSYS;
5632 }
5633 
5634 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5635                                            struct drm_i915_getparam *gparam,
5636                                            int fd, abi_long arg)
5637 {
5638     abi_long ret;
5639     int value;
5640     struct target_drm_i915_getparam *target_gparam;
5641 
5642     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5643         return -TARGET_EFAULT;
5644     }
5645 
5646     __get_user(gparam->param, &target_gparam->param);
5647     gparam->value = &value;
5648     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5649     put_user_s32(value, target_gparam->value);
5650 
5651     unlock_user_struct(target_gparam, arg, 0);
5652     return ret;
5653 }
5654 
5655 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5656                                   int fd, int cmd, abi_long arg)
5657 {
5658     switch (ie->host_cmd) {
5659     case DRM_IOCTL_I915_GETPARAM:
5660         return do_ioctl_drm_i915_getparam(ie,
5661                                           (struct drm_i915_getparam *)buf_temp,
5662                                           fd, arg);
5663     default:
5664         return -TARGET_ENOSYS;
5665     }
5666 }
5667 
5668 #endif
5669 
5670 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5671                                         int fd, int cmd, abi_long arg)
5672 {
5673     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5674     struct tun_filter *target_filter;
5675     char *target_addr;
5676 
5677     assert(ie->access == IOC_W);
5678 
5679     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5680     if (!target_filter) {
5681         return -TARGET_EFAULT;
5682     }
5683     filter->flags = tswap16(target_filter->flags);
5684     filter->count = tswap16(target_filter->count);
5685     unlock_user(target_filter, arg, 0);
5686 
5687     if (filter->count) {
5688         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5689             MAX_STRUCT_SIZE) {
5690             return -TARGET_EFAULT;
5691         }
5692 
5693         target_addr = lock_user(VERIFY_READ,
5694                                 arg + offsetof(struct tun_filter, addr),
5695                                 filter->count * ETH_ALEN, 1);
5696         if (!target_addr) {
5697             return -TARGET_EFAULT;
5698         }
5699         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5700         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5701     }
5702 
5703     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5704 }
5705 
5706 IOCTLEntry ioctl_entries[] = {
5707 #define IOCTL(cmd, access, ...) \
5708     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5709 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5710     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5711 #define IOCTL_IGNORE(cmd) \
5712     { TARGET_ ## cmd, 0, #cmd },
5713 #include "ioctls.h"
5714     { 0, 0, },
5715 };
5716 
5717 /* ??? Implement proper locking for ioctls.  */
5718 /* do_ioctl() Must return target values and target errnos. */
5719 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5720 {
5721     const IOCTLEntry *ie;
5722     const argtype *arg_type;
5723     abi_long ret;
5724     uint8_t buf_temp[MAX_STRUCT_SIZE];
5725     int target_size;
5726     void *argptr;
5727 
5728     ie = ioctl_entries;
5729     for(;;) {
5730         if (ie->target_cmd == 0) {
5731             qemu_log_mask(
5732                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5733             return -TARGET_ENOSYS;
5734         }
5735         if (ie->target_cmd == cmd)
5736             break;
5737         ie++;
5738     }
5739     arg_type = ie->arg_type;
5740     if (ie->do_ioctl) {
5741         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5742     } else if (!ie->host_cmd) {
5743         /* Some architectures define BSD ioctls in their headers
5744            that are not implemented in Linux.  */
5745         return -TARGET_ENOSYS;
5746     }
5747 
5748     switch(arg_type[0]) {
5749     case TYPE_NULL:
5750         /* no argument */
5751         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5752         break;
5753     case TYPE_PTRVOID:
5754     case TYPE_INT:
5755     case TYPE_LONG:
5756     case TYPE_ULONG:
5757         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5758         break;
5759     case TYPE_PTR:
5760         arg_type++;
5761         target_size = thunk_type_size(arg_type, 0);
5762         switch(ie->access) {
5763         case IOC_R:
5764             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5765             if (!is_error(ret)) {
5766                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5767                 if (!argptr)
5768                     return -TARGET_EFAULT;
5769                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5770                 unlock_user(argptr, arg, target_size);
5771             }
5772             break;
5773         case IOC_W:
5774             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775             if (!argptr)
5776                 return -TARGET_EFAULT;
5777             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5778             unlock_user(argptr, arg, 0);
5779             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5780             break;
5781         default:
5782         case IOC_RW:
5783             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5784             if (!argptr)
5785                 return -TARGET_EFAULT;
5786             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5787             unlock_user(argptr, arg, 0);
5788             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5789             if (!is_error(ret)) {
5790                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5791                 if (!argptr)
5792                     return -TARGET_EFAULT;
5793                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5794                 unlock_user(argptr, arg, target_size);
5795             }
5796             break;
5797         }
5798         break;
5799     default:
5800         qemu_log_mask(LOG_UNIMP,
5801                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5802                       (long)cmd, arg_type[0]);
5803         ret = -TARGET_ENOSYS;
5804         break;
5805     }
5806     return ret;
5807 }
5808 
5809 static const bitmask_transtbl iflag_tbl[] = {
5810         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5811         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5812         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5813         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5814         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5815         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5816         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5817         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5818         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5819         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5820         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5821         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5822         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5823         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5824         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5825         { 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl oflag_tbl[] = {
5829 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5830 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5831 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5832 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5833 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5834 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5835 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5836 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5837 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5838 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5839 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5840 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5841 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5842 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5843 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5844 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5845 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5846 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5847 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5848 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5849 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5850 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5851 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5852 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5853 	{ 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl cflag_tbl[] = {
5857 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5858 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5859 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5860 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5861 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5862 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5863 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5864 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5865 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5866 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5867 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5868 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5869 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5870 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5871 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5872 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5873 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5874 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5875 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5876 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5877 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5878 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5879 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5880 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5881 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5882 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5883 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5884 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5885 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5886 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5887 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5888 	{ 0, 0, 0, 0 }
5889 };
5890 
5891 static const bitmask_transtbl lflag_tbl[] = {
5892   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5893   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5894   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5895   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5896   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5897   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5898   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5899   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5900   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5901   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5902   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5903   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5904   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5905   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5906   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5907   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5908   { 0, 0, 0, 0 }
5909 };
5910 
5911 static void target_to_host_termios (void *dst, const void *src)
5912 {
5913     struct host_termios *host = dst;
5914     const struct target_termios *target = src;
5915 
5916     host->c_iflag =
5917         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5918     host->c_oflag =
5919         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5920     host->c_cflag =
5921         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5922     host->c_lflag =
5923         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5924     host->c_line = target->c_line;
5925 
5926     memset(host->c_cc, 0, sizeof(host->c_cc));
5927     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5928     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5929     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5930     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5931     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5932     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5933     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5934     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5935     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5936     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5937     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5938     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5939     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5940     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5941     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5942     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5943     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5944 }
5945 
5946 static void host_to_target_termios (void *dst, const void *src)
5947 {
5948     struct target_termios *target = dst;
5949     const struct host_termios *host = src;
5950 
5951     target->c_iflag =
5952         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5953     target->c_oflag =
5954         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5955     target->c_cflag =
5956         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5957     target->c_lflag =
5958         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5959     target->c_line = host->c_line;
5960 
5961     memset(target->c_cc, 0, sizeof(target->c_cc));
5962     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5963     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5964     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5965     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5966     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5967     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5968     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5969     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5970     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5971     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5972     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5973     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5974     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5975     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5976     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5977     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5978     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5979 }
5980 
5981 static const StructEntry struct_termios_def = {
5982     .convert = { host_to_target_termios, target_to_host_termios },
5983     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5984     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5985     .print = print_termios,
5986 };
5987 
5988 static const bitmask_transtbl mmap_flags_tbl[] = {
5989     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5990     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5991     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5992     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5993       MAP_ANONYMOUS, MAP_ANONYMOUS },
5994     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5995       MAP_GROWSDOWN, MAP_GROWSDOWN },
5996     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5997       MAP_DENYWRITE, MAP_DENYWRITE },
5998     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5999       MAP_EXECUTABLE, MAP_EXECUTABLE },
6000     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6001     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6002       MAP_NORESERVE, MAP_NORESERVE },
6003     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6004     /* MAP_STACK had been ignored by the kernel for quite some time.
6005        Recognize it for the target insofar as we do not want to pass
6006        it through to the host.  */
6007     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6008     { 0, 0, 0, 0 }
6009 };
6010 
6011 /*
6012  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6013  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6014  */
6015 #if defined(TARGET_I386)
6016 
6017 /* NOTE: there is really one LDT for all the threads */
6018 static uint8_t *ldt_table;
6019 
6020 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6021 {
6022     int size;
6023     void *p;
6024 
6025     if (!ldt_table)
6026         return 0;
6027     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6028     if (size > bytecount)
6029         size = bytecount;
6030     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6031     if (!p)
6032         return -TARGET_EFAULT;
6033     /* ??? Should this by byteswapped?  */
6034     memcpy(p, ldt_table, size);
6035     unlock_user(p, ptr, size);
6036     return size;
6037 }
6038 
6039 /* XXX: add locking support */
6040 static abi_long write_ldt(CPUX86State *env,
6041                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6042 {
6043     struct target_modify_ldt_ldt_s ldt_info;
6044     struct target_modify_ldt_ldt_s *target_ldt_info;
6045     int seg_32bit, contents, read_exec_only, limit_in_pages;
6046     int seg_not_present, useable, lm;
6047     uint32_t *lp, entry_1, entry_2;
6048 
6049     if (bytecount != sizeof(ldt_info))
6050         return -TARGET_EINVAL;
6051     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6052         return -TARGET_EFAULT;
6053     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6054     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6055     ldt_info.limit = tswap32(target_ldt_info->limit);
6056     ldt_info.flags = tswap32(target_ldt_info->flags);
6057     unlock_user_struct(target_ldt_info, ptr, 0);
6058 
6059     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6060         return -TARGET_EINVAL;
6061     seg_32bit = ldt_info.flags & 1;
6062     contents = (ldt_info.flags >> 1) & 3;
6063     read_exec_only = (ldt_info.flags >> 3) & 1;
6064     limit_in_pages = (ldt_info.flags >> 4) & 1;
6065     seg_not_present = (ldt_info.flags >> 5) & 1;
6066     useable = (ldt_info.flags >> 6) & 1;
6067 #ifdef TARGET_ABI32
6068     lm = 0;
6069 #else
6070     lm = (ldt_info.flags >> 7) & 1;
6071 #endif
6072     if (contents == 3) {
6073         if (oldmode)
6074             return -TARGET_EINVAL;
6075         if (seg_not_present == 0)
6076             return -TARGET_EINVAL;
6077     }
6078     /* allocate the LDT */
6079     if (!ldt_table) {
6080         env->ldt.base = target_mmap(0,
6081                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6082                                     PROT_READ|PROT_WRITE,
6083                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6084         if (env->ldt.base == -1)
6085             return -TARGET_ENOMEM;
6086         memset(g2h_untagged(env->ldt.base), 0,
6087                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6088         env->ldt.limit = 0xffff;
6089         ldt_table = g2h_untagged(env->ldt.base);
6090     }
6091 
6092     /* NOTE: same code as Linux kernel */
6093     /* Allow LDTs to be cleared by the user. */
6094     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6095         if (oldmode ||
6096             (contents == 0		&&
6097              read_exec_only == 1	&&
6098              seg_32bit == 0		&&
6099              limit_in_pages == 0	&&
6100              seg_not_present == 1	&&
6101              useable == 0 )) {
6102             entry_1 = 0;
6103             entry_2 = 0;
6104             goto install;
6105         }
6106     }
6107 
6108     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6109         (ldt_info.limit & 0x0ffff);
6110     entry_2 = (ldt_info.base_addr & 0xff000000) |
6111         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6112         (ldt_info.limit & 0xf0000) |
6113         ((read_exec_only ^ 1) << 9) |
6114         (contents << 10) |
6115         ((seg_not_present ^ 1) << 15) |
6116         (seg_32bit << 22) |
6117         (limit_in_pages << 23) |
6118         (lm << 21) |
6119         0x7000;
6120     if (!oldmode)
6121         entry_2 |= (useable << 20);
6122 
6123     /* Install the new entry ...  */
6124 install:
6125     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6126     lp[0] = tswap32(entry_1);
6127     lp[1] = tswap32(entry_2);
6128     return 0;
6129 }
6130 
6131 /* specific and weird i386 syscalls */
6132 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6133                               unsigned long bytecount)
6134 {
6135     abi_long ret;
6136 
6137     switch (func) {
6138     case 0:
6139         ret = read_ldt(ptr, bytecount);
6140         break;
6141     case 1:
6142         ret = write_ldt(env, ptr, bytecount, 1);
6143         break;
6144     case 0x11:
6145         ret = write_ldt(env, ptr, bytecount, 0);
6146         break;
6147     default:
6148         ret = -TARGET_ENOSYS;
6149         break;
6150     }
6151     return ret;
6152 }
6153 
6154 #if defined(TARGET_ABI32)
6155 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6156 {
6157     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6158     struct target_modify_ldt_ldt_s ldt_info;
6159     struct target_modify_ldt_ldt_s *target_ldt_info;
6160     int seg_32bit, contents, read_exec_only, limit_in_pages;
6161     int seg_not_present, useable, lm;
6162     uint32_t *lp, entry_1, entry_2;
6163     int i;
6164 
6165     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6166     if (!target_ldt_info)
6167         return -TARGET_EFAULT;
6168     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6169     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6170     ldt_info.limit = tswap32(target_ldt_info->limit);
6171     ldt_info.flags = tswap32(target_ldt_info->flags);
6172     if (ldt_info.entry_number == -1) {
6173         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6174             if (gdt_table[i] == 0) {
6175                 ldt_info.entry_number = i;
6176                 target_ldt_info->entry_number = tswap32(i);
6177                 break;
6178             }
6179         }
6180     }
6181     unlock_user_struct(target_ldt_info, ptr, 1);
6182 
6183     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6184         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6185            return -TARGET_EINVAL;
6186     seg_32bit = ldt_info.flags & 1;
6187     contents = (ldt_info.flags >> 1) & 3;
6188     read_exec_only = (ldt_info.flags >> 3) & 1;
6189     limit_in_pages = (ldt_info.flags >> 4) & 1;
6190     seg_not_present = (ldt_info.flags >> 5) & 1;
6191     useable = (ldt_info.flags >> 6) & 1;
6192 #ifdef TARGET_ABI32
6193     lm = 0;
6194 #else
6195     lm = (ldt_info.flags >> 7) & 1;
6196 #endif
6197 
6198     if (contents == 3) {
6199         if (seg_not_present == 0)
6200             return -TARGET_EINVAL;
6201     }
6202 
6203     /* NOTE: same code as Linux kernel */
6204     /* Allow LDTs to be cleared by the user. */
6205     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6206         if ((contents == 0             &&
6207              read_exec_only == 1       &&
6208              seg_32bit == 0            &&
6209              limit_in_pages == 0       &&
6210              seg_not_present == 1      &&
6211              useable == 0 )) {
6212             entry_1 = 0;
6213             entry_2 = 0;
6214             goto install;
6215         }
6216     }
6217 
6218     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6219         (ldt_info.limit & 0x0ffff);
6220     entry_2 = (ldt_info.base_addr & 0xff000000) |
6221         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6222         (ldt_info.limit & 0xf0000) |
6223         ((read_exec_only ^ 1) << 9) |
6224         (contents << 10) |
6225         ((seg_not_present ^ 1) << 15) |
6226         (seg_32bit << 22) |
6227         (limit_in_pages << 23) |
6228         (useable << 20) |
6229         (lm << 21) |
6230         0x7000;
6231 
6232     /* Install the new entry ...  */
6233 install:
6234     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6235     lp[0] = tswap32(entry_1);
6236     lp[1] = tswap32(entry_2);
6237     return 0;
6238 }
6239 
6240 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6241 {
6242     struct target_modify_ldt_ldt_s *target_ldt_info;
6243     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6244     uint32_t base_addr, limit, flags;
6245     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6246     int seg_not_present, useable, lm;
6247     uint32_t *lp, entry_1, entry_2;
6248 
6249     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6250     if (!target_ldt_info)
6251         return -TARGET_EFAULT;
6252     idx = tswap32(target_ldt_info->entry_number);
6253     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6254         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6255         unlock_user_struct(target_ldt_info, ptr, 1);
6256         return -TARGET_EINVAL;
6257     }
6258     lp = (uint32_t *)(gdt_table + idx);
6259     entry_1 = tswap32(lp[0]);
6260     entry_2 = tswap32(lp[1]);
6261 
6262     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6263     contents = (entry_2 >> 10) & 3;
6264     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6265     seg_32bit = (entry_2 >> 22) & 1;
6266     limit_in_pages = (entry_2 >> 23) & 1;
6267     useable = (entry_2 >> 20) & 1;
6268 #ifdef TARGET_ABI32
6269     lm = 0;
6270 #else
6271     lm = (entry_2 >> 21) & 1;
6272 #endif
6273     flags = (seg_32bit << 0) | (contents << 1) |
6274         (read_exec_only << 3) | (limit_in_pages << 4) |
6275         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6276     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6277     base_addr = (entry_1 >> 16) |
6278         (entry_2 & 0xff000000) |
6279         ((entry_2 & 0xff) << 16);
6280     target_ldt_info->base_addr = tswapal(base_addr);
6281     target_ldt_info->limit = tswap32(limit);
6282     target_ldt_info->flags = tswap32(flags);
6283     unlock_user_struct(target_ldt_info, ptr, 1);
6284     return 0;
6285 }
6286 
6287 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6288 {
6289     return -TARGET_ENOSYS;
6290 }
6291 #else
6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6293 {
6294     abi_long ret = 0;
6295     abi_ulong val;
6296     int idx;
6297 
6298     switch(code) {
6299     case TARGET_ARCH_SET_GS:
6300     case TARGET_ARCH_SET_FS:
6301         if (code == TARGET_ARCH_SET_GS)
6302             idx = R_GS;
6303         else
6304             idx = R_FS;
6305         cpu_x86_load_seg(env, idx, 0);
6306         env->segs[idx].base = addr;
6307         break;
6308     case TARGET_ARCH_GET_GS:
6309     case TARGET_ARCH_GET_FS:
6310         if (code == TARGET_ARCH_GET_GS)
6311             idx = R_GS;
6312         else
6313             idx = R_FS;
6314         val = env->segs[idx].base;
6315         if (put_user(val, addr, abi_ulong))
6316             ret = -TARGET_EFAULT;
6317         break;
6318     default:
6319         ret = -TARGET_EINVAL;
6320         break;
6321     }
6322     return ret;
6323 }
6324 #endif /* defined(TARGET_ABI32 */
6325 #endif /* defined(TARGET_I386) */
6326 
6327 /*
6328  * These constants are generic.  Supply any that are missing from the host.
6329  */
6330 #ifndef PR_SET_NAME
6331 # define PR_SET_NAME    15
6332 # define PR_GET_NAME    16
6333 #endif
6334 #ifndef PR_SET_FP_MODE
6335 # define PR_SET_FP_MODE 45
6336 # define PR_GET_FP_MODE 46
6337 # define PR_FP_MODE_FR   (1 << 0)
6338 # define PR_FP_MODE_FRE  (1 << 1)
6339 #endif
6340 #ifndef PR_SVE_SET_VL
6341 # define PR_SVE_SET_VL  50
6342 # define PR_SVE_GET_VL  51
6343 # define PR_SVE_VL_LEN_MASK  0xffff
6344 # define PR_SVE_VL_INHERIT   (1 << 17)
6345 #endif
6346 #ifndef PR_PAC_RESET_KEYS
6347 # define PR_PAC_RESET_KEYS  54
6348 # define PR_PAC_APIAKEY   (1 << 0)
6349 # define PR_PAC_APIBKEY   (1 << 1)
6350 # define PR_PAC_APDAKEY   (1 << 2)
6351 # define PR_PAC_APDBKEY   (1 << 3)
6352 # define PR_PAC_APGAKEY   (1 << 4)
6353 #endif
6354 #ifndef PR_SET_TAGGED_ADDR_CTRL
6355 # define PR_SET_TAGGED_ADDR_CTRL 55
6356 # define PR_GET_TAGGED_ADDR_CTRL 56
6357 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6358 #endif
6359 #ifndef PR_MTE_TCF_SHIFT
6360 # define PR_MTE_TCF_SHIFT       1
6361 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6362 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6363 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6364 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6365 # define PR_MTE_TAG_SHIFT       3
6366 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6367 #endif
6368 #ifndef PR_SET_IO_FLUSHER
6369 # define PR_SET_IO_FLUSHER 57
6370 # define PR_GET_IO_FLUSHER 58
6371 #endif
6372 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6373 # define PR_SET_SYSCALL_USER_DISPATCH 59
6374 #endif
6375 #ifndef PR_SME_SET_VL
6376 # define PR_SME_SET_VL  63
6377 # define PR_SME_GET_VL  64
6378 # define PR_SME_VL_LEN_MASK  0xffff
6379 # define PR_SME_VL_INHERIT   (1 << 17)
6380 #endif
6381 
6382 #include "target_prctl.h"
6383 
6384 static abi_long do_prctl_inval0(CPUArchState *env)
6385 {
6386     return -TARGET_EINVAL;
6387 }
6388 
6389 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6390 {
6391     return -TARGET_EINVAL;
6392 }
6393 
6394 #ifndef do_prctl_get_fp_mode
6395 #define do_prctl_get_fp_mode do_prctl_inval0
6396 #endif
6397 #ifndef do_prctl_set_fp_mode
6398 #define do_prctl_set_fp_mode do_prctl_inval1
6399 #endif
6400 #ifndef do_prctl_sve_get_vl
6401 #define do_prctl_sve_get_vl do_prctl_inval0
6402 #endif
6403 #ifndef do_prctl_sve_set_vl
6404 #define do_prctl_sve_set_vl do_prctl_inval1
6405 #endif
6406 #ifndef do_prctl_reset_keys
6407 #define do_prctl_reset_keys do_prctl_inval1
6408 #endif
6409 #ifndef do_prctl_set_tagged_addr_ctrl
6410 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6411 #endif
6412 #ifndef do_prctl_get_tagged_addr_ctrl
6413 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6414 #endif
6415 #ifndef do_prctl_get_unalign
6416 #define do_prctl_get_unalign do_prctl_inval1
6417 #endif
6418 #ifndef do_prctl_set_unalign
6419 #define do_prctl_set_unalign do_prctl_inval1
6420 #endif
6421 #ifndef do_prctl_sme_get_vl
6422 #define do_prctl_sme_get_vl do_prctl_inval0
6423 #endif
6424 #ifndef do_prctl_sme_set_vl
6425 #define do_prctl_sme_set_vl do_prctl_inval1
6426 #endif
6427 
6428 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6429                          abi_long arg3, abi_long arg4, abi_long arg5)
6430 {
6431     abi_long ret;
6432 
6433     switch (option) {
6434     case PR_GET_PDEATHSIG:
6435         {
6436             int deathsig;
6437             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6438                                   arg3, arg4, arg5));
6439             if (!is_error(ret) &&
6440                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6441                 return -TARGET_EFAULT;
6442             }
6443             return ret;
6444         }
6445     case PR_SET_PDEATHSIG:
6446         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6447                                arg3, arg4, arg5));
6448     case PR_GET_NAME:
6449         {
6450             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6451             if (!name) {
6452                 return -TARGET_EFAULT;
6453             }
6454             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6455                                   arg3, arg4, arg5));
6456             unlock_user(name, arg2, 16);
6457             return ret;
6458         }
6459     case PR_SET_NAME:
6460         {
6461             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6462             if (!name) {
6463                 return -TARGET_EFAULT;
6464             }
6465             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6466                                   arg3, arg4, arg5));
6467             unlock_user(name, arg2, 0);
6468             return ret;
6469         }
6470     case PR_GET_FP_MODE:
6471         return do_prctl_get_fp_mode(env);
6472     case PR_SET_FP_MODE:
6473         return do_prctl_set_fp_mode(env, arg2);
6474     case PR_SVE_GET_VL:
6475         return do_prctl_sve_get_vl(env);
6476     case PR_SVE_SET_VL:
6477         return do_prctl_sve_set_vl(env, arg2);
6478     case PR_SME_GET_VL:
6479         return do_prctl_sme_get_vl(env);
6480     case PR_SME_SET_VL:
6481         return do_prctl_sme_set_vl(env, arg2);
6482     case PR_PAC_RESET_KEYS:
6483         if (arg3 || arg4 || arg5) {
6484             return -TARGET_EINVAL;
6485         }
6486         return do_prctl_reset_keys(env, arg2);
6487     case PR_SET_TAGGED_ADDR_CTRL:
6488         if (arg3 || arg4 || arg5) {
6489             return -TARGET_EINVAL;
6490         }
6491         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6492     case PR_GET_TAGGED_ADDR_CTRL:
6493         if (arg2 || arg3 || arg4 || arg5) {
6494             return -TARGET_EINVAL;
6495         }
6496         return do_prctl_get_tagged_addr_ctrl(env);
6497 
6498     case PR_GET_UNALIGN:
6499         return do_prctl_get_unalign(env, arg2);
6500     case PR_SET_UNALIGN:
6501         return do_prctl_set_unalign(env, arg2);
6502 
6503     case PR_CAP_AMBIENT:
6504     case PR_CAPBSET_READ:
6505     case PR_CAPBSET_DROP:
6506     case PR_GET_DUMPABLE:
6507     case PR_SET_DUMPABLE:
6508     case PR_GET_KEEPCAPS:
6509     case PR_SET_KEEPCAPS:
6510     case PR_GET_SECUREBITS:
6511     case PR_SET_SECUREBITS:
6512     case PR_GET_TIMING:
6513     case PR_SET_TIMING:
6514     case PR_GET_TIMERSLACK:
6515     case PR_SET_TIMERSLACK:
6516     case PR_MCE_KILL:
6517     case PR_MCE_KILL_GET:
6518     case PR_GET_NO_NEW_PRIVS:
6519     case PR_SET_NO_NEW_PRIVS:
6520     case PR_GET_IO_FLUSHER:
6521     case PR_SET_IO_FLUSHER:
6522         /* Some prctl options have no pointer arguments and we can pass on. */
6523         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6524 
6525     case PR_GET_CHILD_SUBREAPER:
6526     case PR_SET_CHILD_SUBREAPER:
6527     case PR_GET_SPECULATION_CTRL:
6528     case PR_SET_SPECULATION_CTRL:
6529     case PR_GET_TID_ADDRESS:
6530         /* TODO */
6531         return -TARGET_EINVAL;
6532 
6533     case PR_GET_FPEXC:
6534     case PR_SET_FPEXC:
6535         /* Was used for SPE on PowerPC. */
6536         return -TARGET_EINVAL;
6537 
6538     case PR_GET_ENDIAN:
6539     case PR_SET_ENDIAN:
6540     case PR_GET_FPEMU:
6541     case PR_SET_FPEMU:
6542     case PR_SET_MM:
6543     case PR_GET_SECCOMP:
6544     case PR_SET_SECCOMP:
6545     case PR_SET_SYSCALL_USER_DISPATCH:
6546     case PR_GET_THP_DISABLE:
6547     case PR_SET_THP_DISABLE:
6548     case PR_GET_TSC:
6549     case PR_SET_TSC:
6550         /* Disable to prevent the target disabling stuff we need. */
6551         return -TARGET_EINVAL;
6552 
6553     default:
6554         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6555                       option);
6556         return -TARGET_EINVAL;
6557     }
6558 }
6559 
6560 #define NEW_STACK_SIZE 0x40000
6561 
6562 
6563 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6564 typedef struct {
6565     CPUArchState *env;
6566     pthread_mutex_t mutex;
6567     pthread_cond_t cond;
6568     pthread_t thread;
6569     uint32_t tid;
6570     abi_ulong child_tidptr;
6571     abi_ulong parent_tidptr;
6572     sigset_t sigmask;
6573 } new_thread_info;
6574 
6575 static void *clone_func(void *arg)
6576 {
6577     new_thread_info *info = arg;
6578     CPUArchState *env;
6579     CPUState *cpu;
6580     TaskState *ts;
6581 
6582     rcu_register_thread();
6583     tcg_register_thread();
6584     env = info->env;
6585     cpu = env_cpu(env);
6586     thread_cpu = cpu;
6587     ts = (TaskState *)cpu->opaque;
6588     info->tid = sys_gettid();
6589     task_settid(ts);
6590     if (info->child_tidptr)
6591         put_user_u32(info->tid, info->child_tidptr);
6592     if (info->parent_tidptr)
6593         put_user_u32(info->tid, info->parent_tidptr);
6594     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6595     /* Enable signals.  */
6596     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6597     /* Signal to the parent that we're ready.  */
6598     pthread_mutex_lock(&info->mutex);
6599     pthread_cond_broadcast(&info->cond);
6600     pthread_mutex_unlock(&info->mutex);
6601     /* Wait until the parent has finished initializing the tls state.  */
6602     pthread_mutex_lock(&clone_lock);
6603     pthread_mutex_unlock(&clone_lock);
6604     cpu_loop(env);
6605     /* never exits */
6606     return NULL;
6607 }
6608 
6609 /* do_fork() Must return host values and target errnos (unlike most
6610    do_*() functions). */
6611 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6612                    abi_ulong parent_tidptr, target_ulong newtls,
6613                    abi_ulong child_tidptr)
6614 {
6615     CPUState *cpu = env_cpu(env);
6616     int ret;
6617     TaskState *ts;
6618     CPUState *new_cpu;
6619     CPUArchState *new_env;
6620     sigset_t sigmask;
6621 
6622     flags &= ~CLONE_IGNORED_FLAGS;
6623 
6624     /* Emulate vfork() with fork() */
6625     if (flags & CLONE_VFORK)
6626         flags &= ~(CLONE_VFORK | CLONE_VM);
6627 
6628     if (flags & CLONE_VM) {
6629         TaskState *parent_ts = (TaskState *)cpu->opaque;
6630         new_thread_info info;
6631         pthread_attr_t attr;
6632 
6633         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6634             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6635             return -TARGET_EINVAL;
6636         }
6637 
6638         ts = g_new0(TaskState, 1);
6639         init_task_state(ts);
6640 
6641         /* Grab a mutex so that thread setup appears atomic.  */
6642         pthread_mutex_lock(&clone_lock);
6643 
6644         /*
6645          * If this is our first additional thread, we need to ensure we
6646          * generate code for parallel execution and flush old translations.
6647          * Do this now so that the copy gets CF_PARALLEL too.
6648          */
6649         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6650             cpu->tcg_cflags |= CF_PARALLEL;
6651             tb_flush(cpu);
6652         }
6653 
6654         /* we create a new CPU instance. */
6655         new_env = cpu_copy(env);
6656         /* Init regs that differ from the parent.  */
6657         cpu_clone_regs_child(new_env, newsp, flags);
6658         cpu_clone_regs_parent(env, flags);
6659         new_cpu = env_cpu(new_env);
6660         new_cpu->opaque = ts;
6661         ts->bprm = parent_ts->bprm;
6662         ts->info = parent_ts->info;
6663         ts->signal_mask = parent_ts->signal_mask;
6664 
6665         if (flags & CLONE_CHILD_CLEARTID) {
6666             ts->child_tidptr = child_tidptr;
6667         }
6668 
6669         if (flags & CLONE_SETTLS) {
6670             cpu_set_tls (new_env, newtls);
6671         }
6672 
6673         memset(&info, 0, sizeof(info));
6674         pthread_mutex_init(&info.mutex, NULL);
6675         pthread_mutex_lock(&info.mutex);
6676         pthread_cond_init(&info.cond, NULL);
6677         info.env = new_env;
6678         if (flags & CLONE_CHILD_SETTID) {
6679             info.child_tidptr = child_tidptr;
6680         }
6681         if (flags & CLONE_PARENT_SETTID) {
6682             info.parent_tidptr = parent_tidptr;
6683         }
6684 
6685         ret = pthread_attr_init(&attr);
6686         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6687         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6688         /* It is not safe to deliver signals until the child has finished
6689            initializing, so temporarily block all signals.  */
6690         sigfillset(&sigmask);
6691         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6692         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6693 
6694         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6695         /* TODO: Free new CPU state if thread creation failed.  */
6696 
6697         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6698         pthread_attr_destroy(&attr);
6699         if (ret == 0) {
6700             /* Wait for the child to initialize.  */
6701             pthread_cond_wait(&info.cond, &info.mutex);
6702             ret = info.tid;
6703         } else {
6704             ret = -1;
6705         }
6706         pthread_mutex_unlock(&info.mutex);
6707         pthread_cond_destroy(&info.cond);
6708         pthread_mutex_destroy(&info.mutex);
6709         pthread_mutex_unlock(&clone_lock);
6710     } else {
6711         /* if no CLONE_VM, we consider it is a fork */
6712         if (flags & CLONE_INVALID_FORK_FLAGS) {
6713             return -TARGET_EINVAL;
6714         }
6715 
6716         /* We can't support custom termination signals */
6717         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6718             return -TARGET_EINVAL;
6719         }
6720 
6721         if (block_signals()) {
6722             return -QEMU_ERESTARTSYS;
6723         }
6724 
6725         fork_start();
6726         ret = fork();
6727         if (ret == 0) {
6728             /* Child Process.  */
6729             cpu_clone_regs_child(env, newsp, flags);
6730             fork_end(1);
6731             /* There is a race condition here.  The parent process could
6732                theoretically read the TID in the child process before the child
6733                tid is set.  This would require using either ptrace
6734                (not implemented) or having *_tidptr to point at a shared memory
6735                mapping.  We can't repeat the spinlock hack used above because
6736                the child process gets its own copy of the lock.  */
6737             if (flags & CLONE_CHILD_SETTID)
6738                 put_user_u32(sys_gettid(), child_tidptr);
6739             if (flags & CLONE_PARENT_SETTID)
6740                 put_user_u32(sys_gettid(), parent_tidptr);
6741             ts = (TaskState *)cpu->opaque;
6742             if (flags & CLONE_SETTLS)
6743                 cpu_set_tls (env, newtls);
6744             if (flags & CLONE_CHILD_CLEARTID)
6745                 ts->child_tidptr = child_tidptr;
6746         } else {
6747             cpu_clone_regs_parent(env, flags);
6748             fork_end(0);
6749         }
6750     }
6751     return ret;
6752 }
6753 
6754 /* warning : doesn't handle linux specific flags... */
6755 static int target_to_host_fcntl_cmd(int cmd)
6756 {
6757     int ret;
6758 
6759     switch(cmd) {
6760     case TARGET_F_DUPFD:
6761     case TARGET_F_GETFD:
6762     case TARGET_F_SETFD:
6763     case TARGET_F_GETFL:
6764     case TARGET_F_SETFL:
6765     case TARGET_F_OFD_GETLK:
6766     case TARGET_F_OFD_SETLK:
6767     case TARGET_F_OFD_SETLKW:
6768         ret = cmd;
6769         break;
6770     case TARGET_F_GETLK:
6771         ret = F_GETLK64;
6772         break;
6773     case TARGET_F_SETLK:
6774         ret = F_SETLK64;
6775         break;
6776     case TARGET_F_SETLKW:
6777         ret = F_SETLKW64;
6778         break;
6779     case TARGET_F_GETOWN:
6780         ret = F_GETOWN;
6781         break;
6782     case TARGET_F_SETOWN:
6783         ret = F_SETOWN;
6784         break;
6785     case TARGET_F_GETSIG:
6786         ret = F_GETSIG;
6787         break;
6788     case TARGET_F_SETSIG:
6789         ret = F_SETSIG;
6790         break;
6791 #if TARGET_ABI_BITS == 32
6792     case TARGET_F_GETLK64:
6793         ret = F_GETLK64;
6794         break;
6795     case TARGET_F_SETLK64:
6796         ret = F_SETLK64;
6797         break;
6798     case TARGET_F_SETLKW64:
6799         ret = F_SETLKW64;
6800         break;
6801 #endif
6802     case TARGET_F_SETLEASE:
6803         ret = F_SETLEASE;
6804         break;
6805     case TARGET_F_GETLEASE:
6806         ret = F_GETLEASE;
6807         break;
6808 #ifdef F_DUPFD_CLOEXEC
6809     case TARGET_F_DUPFD_CLOEXEC:
6810         ret = F_DUPFD_CLOEXEC;
6811         break;
6812 #endif
6813     case TARGET_F_NOTIFY:
6814         ret = F_NOTIFY;
6815         break;
6816 #ifdef F_GETOWN_EX
6817     case TARGET_F_GETOWN_EX:
6818         ret = F_GETOWN_EX;
6819         break;
6820 #endif
6821 #ifdef F_SETOWN_EX
6822     case TARGET_F_SETOWN_EX:
6823         ret = F_SETOWN_EX;
6824         break;
6825 #endif
6826 #ifdef F_SETPIPE_SZ
6827     case TARGET_F_SETPIPE_SZ:
6828         ret = F_SETPIPE_SZ;
6829         break;
6830     case TARGET_F_GETPIPE_SZ:
6831         ret = F_GETPIPE_SZ;
6832         break;
6833 #endif
6834 #ifdef F_ADD_SEALS
6835     case TARGET_F_ADD_SEALS:
6836         ret = F_ADD_SEALS;
6837         break;
6838     case TARGET_F_GET_SEALS:
6839         ret = F_GET_SEALS;
6840         break;
6841 #endif
6842     default:
6843         ret = -TARGET_EINVAL;
6844         break;
6845     }
6846 
6847 #if defined(__powerpc64__)
6848     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6849      * is not supported by kernel. The glibc fcntl call actually adjusts
6850      * them to 5, 6 and 7 before making the syscall(). Since we make the
6851      * syscall directly, adjust to what is supported by the kernel.
6852      */
6853     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6854         ret -= F_GETLK64 - 5;
6855     }
6856 #endif
6857 
6858     return ret;
6859 }
6860 
6861 #define FLOCK_TRANSTBL \
6862     switch (type) { \
6863     TRANSTBL_CONVERT(F_RDLCK); \
6864     TRANSTBL_CONVERT(F_WRLCK); \
6865     TRANSTBL_CONVERT(F_UNLCK); \
6866     }
6867 
6868 static int target_to_host_flock(int type)
6869 {
6870 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6871     FLOCK_TRANSTBL
6872 #undef  TRANSTBL_CONVERT
6873     return -TARGET_EINVAL;
6874 }
6875 
6876 static int host_to_target_flock(int type)
6877 {
6878 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6879     FLOCK_TRANSTBL
6880 #undef  TRANSTBL_CONVERT
6881     /* if we don't know how to convert the value coming
6882      * from the host we copy to the target field as-is
6883      */
6884     return type;
6885 }
6886 
6887 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6888                                             abi_ulong target_flock_addr)
6889 {
6890     struct target_flock *target_fl;
6891     int l_type;
6892 
6893     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6894         return -TARGET_EFAULT;
6895     }
6896 
6897     __get_user(l_type, &target_fl->l_type);
6898     l_type = target_to_host_flock(l_type);
6899     if (l_type < 0) {
6900         return l_type;
6901     }
6902     fl->l_type = l_type;
6903     __get_user(fl->l_whence, &target_fl->l_whence);
6904     __get_user(fl->l_start, &target_fl->l_start);
6905     __get_user(fl->l_len, &target_fl->l_len);
6906     __get_user(fl->l_pid, &target_fl->l_pid);
6907     unlock_user_struct(target_fl, target_flock_addr, 0);
6908     return 0;
6909 }
6910 
6911 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6912                                           const struct flock64 *fl)
6913 {
6914     struct target_flock *target_fl;
6915     short l_type;
6916 
6917     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6918         return -TARGET_EFAULT;
6919     }
6920 
6921     l_type = host_to_target_flock(fl->l_type);
6922     __put_user(l_type, &target_fl->l_type);
6923     __put_user(fl->l_whence, &target_fl->l_whence);
6924     __put_user(fl->l_start, &target_fl->l_start);
6925     __put_user(fl->l_len, &target_fl->l_len);
6926     __put_user(fl->l_pid, &target_fl->l_pid);
6927     unlock_user_struct(target_fl, target_flock_addr, 1);
6928     return 0;
6929 }
6930 
6931 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6932 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6933 
6934 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6935 struct target_oabi_flock64 {
6936     abi_short l_type;
6937     abi_short l_whence;
6938     abi_llong l_start;
6939     abi_llong l_len;
6940     abi_int   l_pid;
6941 } QEMU_PACKED;
6942 
6943 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6944                                                    abi_ulong target_flock_addr)
6945 {
6946     struct target_oabi_flock64 *target_fl;
6947     int l_type;
6948 
6949     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6950         return -TARGET_EFAULT;
6951     }
6952 
6953     __get_user(l_type, &target_fl->l_type);
6954     l_type = target_to_host_flock(l_type);
6955     if (l_type < 0) {
6956         return l_type;
6957     }
6958     fl->l_type = l_type;
6959     __get_user(fl->l_whence, &target_fl->l_whence);
6960     __get_user(fl->l_start, &target_fl->l_start);
6961     __get_user(fl->l_len, &target_fl->l_len);
6962     __get_user(fl->l_pid, &target_fl->l_pid);
6963     unlock_user_struct(target_fl, target_flock_addr, 0);
6964     return 0;
6965 }
6966 
6967 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6968                                                  const struct flock64 *fl)
6969 {
6970     struct target_oabi_flock64 *target_fl;
6971     short l_type;
6972 
6973     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6974         return -TARGET_EFAULT;
6975     }
6976 
6977     l_type = host_to_target_flock(fl->l_type);
6978     __put_user(l_type, &target_fl->l_type);
6979     __put_user(fl->l_whence, &target_fl->l_whence);
6980     __put_user(fl->l_start, &target_fl->l_start);
6981     __put_user(fl->l_len, &target_fl->l_len);
6982     __put_user(fl->l_pid, &target_fl->l_pid);
6983     unlock_user_struct(target_fl, target_flock_addr, 1);
6984     return 0;
6985 }
6986 #endif
6987 
6988 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6989                                               abi_ulong target_flock_addr)
6990 {
6991     struct target_flock64 *target_fl;
6992     int l_type;
6993 
6994     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6995         return -TARGET_EFAULT;
6996     }
6997 
6998     __get_user(l_type, &target_fl->l_type);
6999     l_type = target_to_host_flock(l_type);
7000     if (l_type < 0) {
7001         return l_type;
7002     }
7003     fl->l_type = l_type;
7004     __get_user(fl->l_whence, &target_fl->l_whence);
7005     __get_user(fl->l_start, &target_fl->l_start);
7006     __get_user(fl->l_len, &target_fl->l_len);
7007     __get_user(fl->l_pid, &target_fl->l_pid);
7008     unlock_user_struct(target_fl, target_flock_addr, 0);
7009     return 0;
7010 }
7011 
7012 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7013                                             const struct flock64 *fl)
7014 {
7015     struct target_flock64 *target_fl;
7016     short l_type;
7017 
7018     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7019         return -TARGET_EFAULT;
7020     }
7021 
7022     l_type = host_to_target_flock(fl->l_type);
7023     __put_user(l_type, &target_fl->l_type);
7024     __put_user(fl->l_whence, &target_fl->l_whence);
7025     __put_user(fl->l_start, &target_fl->l_start);
7026     __put_user(fl->l_len, &target_fl->l_len);
7027     __put_user(fl->l_pid, &target_fl->l_pid);
7028     unlock_user_struct(target_fl, target_flock_addr, 1);
7029     return 0;
7030 }
7031 
7032 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7033 {
7034     struct flock64 fl64;
7035 #ifdef F_GETOWN_EX
7036     struct f_owner_ex fox;
7037     struct target_f_owner_ex *target_fox;
7038 #endif
7039     abi_long ret;
7040     int host_cmd = target_to_host_fcntl_cmd(cmd);
7041 
7042     if (host_cmd == -TARGET_EINVAL)
7043 	    return host_cmd;
7044 
7045     switch(cmd) {
7046     case TARGET_F_GETLK:
7047         ret = copy_from_user_flock(&fl64, arg);
7048         if (ret) {
7049             return ret;
7050         }
7051         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7052         if (ret == 0) {
7053             ret = copy_to_user_flock(arg, &fl64);
7054         }
7055         break;
7056 
7057     case TARGET_F_SETLK:
7058     case TARGET_F_SETLKW:
7059         ret = copy_from_user_flock(&fl64, arg);
7060         if (ret) {
7061             return ret;
7062         }
7063         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7064         break;
7065 
7066     case TARGET_F_GETLK64:
7067     case TARGET_F_OFD_GETLK:
7068         ret = copy_from_user_flock64(&fl64, arg);
7069         if (ret) {
7070             return ret;
7071         }
7072         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7073         if (ret == 0) {
7074             ret = copy_to_user_flock64(arg, &fl64);
7075         }
7076         break;
7077     case TARGET_F_SETLK64:
7078     case TARGET_F_SETLKW64:
7079     case TARGET_F_OFD_SETLK:
7080     case TARGET_F_OFD_SETLKW:
7081         ret = copy_from_user_flock64(&fl64, arg);
7082         if (ret) {
7083             return ret;
7084         }
7085         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7086         break;
7087 
7088     case TARGET_F_GETFL:
7089         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7090         if (ret >= 0) {
7091             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7092         }
7093         break;
7094 
7095     case TARGET_F_SETFL:
7096         ret = get_errno(safe_fcntl(fd, host_cmd,
7097                                    target_to_host_bitmask(arg,
7098                                                           fcntl_flags_tbl)));
7099         break;
7100 
7101 #ifdef F_GETOWN_EX
7102     case TARGET_F_GETOWN_EX:
7103         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7104         if (ret >= 0) {
7105             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7106                 return -TARGET_EFAULT;
7107             target_fox->type = tswap32(fox.type);
7108             target_fox->pid = tswap32(fox.pid);
7109             unlock_user_struct(target_fox, arg, 1);
7110         }
7111         break;
7112 #endif
7113 
7114 #ifdef F_SETOWN_EX
7115     case TARGET_F_SETOWN_EX:
7116         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7117             return -TARGET_EFAULT;
7118         fox.type = tswap32(target_fox->type);
7119         fox.pid = tswap32(target_fox->pid);
7120         unlock_user_struct(target_fox, arg, 0);
7121         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7122         break;
7123 #endif
7124 
7125     case TARGET_F_SETSIG:
7126         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7127         break;
7128 
7129     case TARGET_F_GETSIG:
7130         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7131         break;
7132 
7133     case TARGET_F_SETOWN:
7134     case TARGET_F_GETOWN:
7135     case TARGET_F_SETLEASE:
7136     case TARGET_F_GETLEASE:
7137     case TARGET_F_SETPIPE_SZ:
7138     case TARGET_F_GETPIPE_SZ:
7139     case TARGET_F_ADD_SEALS:
7140     case TARGET_F_GET_SEALS:
7141         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7142         break;
7143 
7144     default:
7145         ret = get_errno(safe_fcntl(fd, cmd, arg));
7146         break;
7147     }
7148     return ret;
7149 }
7150 
7151 #ifdef USE_UID16
7152 
7153 static inline int high2lowuid(int uid)
7154 {
7155     if (uid > 65535)
7156         return 65534;
7157     else
7158         return uid;
7159 }
7160 
7161 static inline int high2lowgid(int gid)
7162 {
7163     if (gid > 65535)
7164         return 65534;
7165     else
7166         return gid;
7167 }
7168 
7169 static inline int low2highuid(int uid)
7170 {
7171     if ((int16_t)uid == -1)
7172         return -1;
7173     else
7174         return uid;
7175 }
7176 
7177 static inline int low2highgid(int gid)
7178 {
7179     if ((int16_t)gid == -1)
7180         return -1;
7181     else
7182         return gid;
7183 }
7184 static inline int tswapid(int id)
7185 {
7186     return tswap16(id);
7187 }
7188 
7189 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7190 
7191 #else /* !USE_UID16 */
7192 static inline int high2lowuid(int uid)
7193 {
7194     return uid;
7195 }
7196 static inline int high2lowgid(int gid)
7197 {
7198     return gid;
7199 }
7200 static inline int low2highuid(int uid)
7201 {
7202     return uid;
7203 }
7204 static inline int low2highgid(int gid)
7205 {
7206     return gid;
7207 }
7208 static inline int tswapid(int id)
7209 {
7210     return tswap32(id);
7211 }
7212 
7213 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7214 
7215 #endif /* USE_UID16 */
7216 
7217 /* We must do direct syscalls for setting UID/GID, because we want to
7218  * implement the Linux system call semantics of "change only for this thread",
7219  * not the libc/POSIX semantics of "change for all threads in process".
7220  * (See http://ewontfix.com/17/ for more details.)
7221  * We use the 32-bit version of the syscalls if present; if it is not
7222  * then either the host architecture supports 32-bit UIDs natively with
7223  * the standard syscall, or the 16-bit UID is the best we can do.
7224  */
7225 #ifdef __NR_setuid32
7226 #define __NR_sys_setuid __NR_setuid32
7227 #else
7228 #define __NR_sys_setuid __NR_setuid
7229 #endif
7230 #ifdef __NR_setgid32
7231 #define __NR_sys_setgid __NR_setgid32
7232 #else
7233 #define __NR_sys_setgid __NR_setgid
7234 #endif
7235 #ifdef __NR_setresuid32
7236 #define __NR_sys_setresuid __NR_setresuid32
7237 #else
7238 #define __NR_sys_setresuid __NR_setresuid
7239 #endif
7240 #ifdef __NR_setresgid32
7241 #define __NR_sys_setresgid __NR_setresgid32
7242 #else
7243 #define __NR_sys_setresgid __NR_setresgid
7244 #endif
7245 
7246 _syscall1(int, sys_setuid, uid_t, uid)
7247 _syscall1(int, sys_setgid, gid_t, gid)
7248 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7249 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7250 
7251 void syscall_init(void)
7252 {
7253     IOCTLEntry *ie;
7254     const argtype *arg_type;
7255     int size;
7256 
7257     thunk_init(STRUCT_MAX);
7258 
7259 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7260 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7261 #include "syscall_types.h"
7262 #undef STRUCT
7263 #undef STRUCT_SPECIAL
7264 
7265     /* we patch the ioctl size if necessary. We rely on the fact that
7266        no ioctl has all the bits at '1' in the size field */
7267     ie = ioctl_entries;
7268     while (ie->target_cmd != 0) {
7269         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7270             TARGET_IOC_SIZEMASK) {
7271             arg_type = ie->arg_type;
7272             if (arg_type[0] != TYPE_PTR) {
7273                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7274                         ie->target_cmd);
7275                 exit(1);
7276             }
7277             arg_type++;
7278             size = thunk_type_size(arg_type, 0);
7279             ie->target_cmd = (ie->target_cmd &
7280                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7281                 (size << TARGET_IOC_SIZESHIFT);
7282         }
7283 
7284         /* automatic consistency check if same arch */
7285 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7286     (defined(__x86_64__) && defined(TARGET_X86_64))
7287         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7288             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7289                     ie->name, ie->target_cmd, ie->host_cmd);
7290         }
7291 #endif
7292         ie++;
7293     }
7294 }
7295 
7296 #ifdef TARGET_NR_truncate64
7297 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7298                                          abi_long arg2,
7299                                          abi_long arg3,
7300                                          abi_long arg4)
7301 {
7302     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7303         arg2 = arg3;
7304         arg3 = arg4;
7305     }
7306     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7307 }
7308 #endif
7309 
7310 #ifdef TARGET_NR_ftruncate64
7311 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7312                                           abi_long arg2,
7313                                           abi_long arg3,
7314                                           abi_long arg4)
7315 {
7316     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7317         arg2 = arg3;
7318         arg3 = arg4;
7319     }
7320     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7321 }
7322 #endif
7323 
7324 #if defined(TARGET_NR_timer_settime) || \
7325     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7326 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7327                                                  abi_ulong target_addr)
7328 {
7329     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7330                                 offsetof(struct target_itimerspec,
7331                                          it_interval)) ||
7332         target_to_host_timespec(&host_its->it_value, target_addr +
7333                                 offsetof(struct target_itimerspec,
7334                                          it_value))) {
7335         return -TARGET_EFAULT;
7336     }
7337 
7338     return 0;
7339 }
7340 #endif
7341 
7342 #if defined(TARGET_NR_timer_settime64) || \
7343     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7344 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7345                                                    abi_ulong target_addr)
7346 {
7347     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7348                                   offsetof(struct target__kernel_itimerspec,
7349                                            it_interval)) ||
7350         target_to_host_timespec64(&host_its->it_value, target_addr +
7351                                   offsetof(struct target__kernel_itimerspec,
7352                                            it_value))) {
7353         return -TARGET_EFAULT;
7354     }
7355 
7356     return 0;
7357 }
7358 #endif
7359 
7360 #if ((defined(TARGET_NR_timerfd_gettime) || \
7361       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7362       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7363 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7364                                                  struct itimerspec *host_its)
7365 {
7366     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7367                                                        it_interval),
7368                                 &host_its->it_interval) ||
7369         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7370                                                        it_value),
7371                                 &host_its->it_value)) {
7372         return -TARGET_EFAULT;
7373     }
7374     return 0;
7375 }
7376 #endif
7377 
7378 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7379       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7380       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7381 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7382                                                    struct itimerspec *host_its)
7383 {
7384     if (host_to_target_timespec64(target_addr +
7385                                   offsetof(struct target__kernel_itimerspec,
7386                                            it_interval),
7387                                   &host_its->it_interval) ||
7388         host_to_target_timespec64(target_addr +
7389                                   offsetof(struct target__kernel_itimerspec,
7390                                            it_value),
7391                                   &host_its->it_value)) {
7392         return -TARGET_EFAULT;
7393     }
7394     return 0;
7395 }
7396 #endif
7397 
7398 #if defined(TARGET_NR_adjtimex) || \
7399     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7400 static inline abi_long target_to_host_timex(struct timex *host_tx,
7401                                             abi_long target_addr)
7402 {
7403     struct target_timex *target_tx;
7404 
7405     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7406         return -TARGET_EFAULT;
7407     }
7408 
7409     __get_user(host_tx->modes, &target_tx->modes);
7410     __get_user(host_tx->offset, &target_tx->offset);
7411     __get_user(host_tx->freq, &target_tx->freq);
7412     __get_user(host_tx->maxerror, &target_tx->maxerror);
7413     __get_user(host_tx->esterror, &target_tx->esterror);
7414     __get_user(host_tx->status, &target_tx->status);
7415     __get_user(host_tx->constant, &target_tx->constant);
7416     __get_user(host_tx->precision, &target_tx->precision);
7417     __get_user(host_tx->tolerance, &target_tx->tolerance);
7418     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7419     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7420     __get_user(host_tx->tick, &target_tx->tick);
7421     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7422     __get_user(host_tx->jitter, &target_tx->jitter);
7423     __get_user(host_tx->shift, &target_tx->shift);
7424     __get_user(host_tx->stabil, &target_tx->stabil);
7425     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7426     __get_user(host_tx->calcnt, &target_tx->calcnt);
7427     __get_user(host_tx->errcnt, &target_tx->errcnt);
7428     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7429     __get_user(host_tx->tai, &target_tx->tai);
7430 
7431     unlock_user_struct(target_tx, target_addr, 0);
7432     return 0;
7433 }
7434 
7435 static inline abi_long host_to_target_timex(abi_long target_addr,
7436                                             struct timex *host_tx)
7437 {
7438     struct target_timex *target_tx;
7439 
7440     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7441         return -TARGET_EFAULT;
7442     }
7443 
7444     __put_user(host_tx->modes, &target_tx->modes);
7445     __put_user(host_tx->offset, &target_tx->offset);
7446     __put_user(host_tx->freq, &target_tx->freq);
7447     __put_user(host_tx->maxerror, &target_tx->maxerror);
7448     __put_user(host_tx->esterror, &target_tx->esterror);
7449     __put_user(host_tx->status, &target_tx->status);
7450     __put_user(host_tx->constant, &target_tx->constant);
7451     __put_user(host_tx->precision, &target_tx->precision);
7452     __put_user(host_tx->tolerance, &target_tx->tolerance);
7453     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7454     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7455     __put_user(host_tx->tick, &target_tx->tick);
7456     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7457     __put_user(host_tx->jitter, &target_tx->jitter);
7458     __put_user(host_tx->shift, &target_tx->shift);
7459     __put_user(host_tx->stabil, &target_tx->stabil);
7460     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7461     __put_user(host_tx->calcnt, &target_tx->calcnt);
7462     __put_user(host_tx->errcnt, &target_tx->errcnt);
7463     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7464     __put_user(host_tx->tai, &target_tx->tai);
7465 
7466     unlock_user_struct(target_tx, target_addr, 1);
7467     return 0;
7468 }
7469 #endif
7470 
7471 
7472 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7473 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7474                                               abi_long target_addr)
7475 {
7476     struct target__kernel_timex *target_tx;
7477 
7478     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7479                                  offsetof(struct target__kernel_timex,
7480                                           time))) {
7481         return -TARGET_EFAULT;
7482     }
7483 
7484     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7485         return -TARGET_EFAULT;
7486     }
7487 
7488     __get_user(host_tx->modes, &target_tx->modes);
7489     __get_user(host_tx->offset, &target_tx->offset);
7490     __get_user(host_tx->freq, &target_tx->freq);
7491     __get_user(host_tx->maxerror, &target_tx->maxerror);
7492     __get_user(host_tx->esterror, &target_tx->esterror);
7493     __get_user(host_tx->status, &target_tx->status);
7494     __get_user(host_tx->constant, &target_tx->constant);
7495     __get_user(host_tx->precision, &target_tx->precision);
7496     __get_user(host_tx->tolerance, &target_tx->tolerance);
7497     __get_user(host_tx->tick, &target_tx->tick);
7498     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7499     __get_user(host_tx->jitter, &target_tx->jitter);
7500     __get_user(host_tx->shift, &target_tx->shift);
7501     __get_user(host_tx->stabil, &target_tx->stabil);
7502     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7503     __get_user(host_tx->calcnt, &target_tx->calcnt);
7504     __get_user(host_tx->errcnt, &target_tx->errcnt);
7505     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7506     __get_user(host_tx->tai, &target_tx->tai);
7507 
7508     unlock_user_struct(target_tx, target_addr, 0);
7509     return 0;
7510 }
7511 
7512 static inline abi_long host_to_target_timex64(abi_long target_addr,
7513                                               struct timex *host_tx)
7514 {
7515     struct target__kernel_timex *target_tx;
7516 
7517    if (copy_to_user_timeval64(target_addr +
7518                               offsetof(struct target__kernel_timex, time),
7519                               &host_tx->time)) {
7520         return -TARGET_EFAULT;
7521     }
7522 
7523     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7524         return -TARGET_EFAULT;
7525     }
7526 
7527     __put_user(host_tx->modes, &target_tx->modes);
7528     __put_user(host_tx->offset, &target_tx->offset);
7529     __put_user(host_tx->freq, &target_tx->freq);
7530     __put_user(host_tx->maxerror, &target_tx->maxerror);
7531     __put_user(host_tx->esterror, &target_tx->esterror);
7532     __put_user(host_tx->status, &target_tx->status);
7533     __put_user(host_tx->constant, &target_tx->constant);
7534     __put_user(host_tx->precision, &target_tx->precision);
7535     __put_user(host_tx->tolerance, &target_tx->tolerance);
7536     __put_user(host_tx->tick, &target_tx->tick);
7537     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7538     __put_user(host_tx->jitter, &target_tx->jitter);
7539     __put_user(host_tx->shift, &target_tx->shift);
7540     __put_user(host_tx->stabil, &target_tx->stabil);
7541     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7542     __put_user(host_tx->calcnt, &target_tx->calcnt);
7543     __put_user(host_tx->errcnt, &target_tx->errcnt);
7544     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7545     __put_user(host_tx->tai, &target_tx->tai);
7546 
7547     unlock_user_struct(target_tx, target_addr, 1);
7548     return 0;
7549 }
7550 #endif
7551 
7552 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7553 #define sigev_notify_thread_id _sigev_un._tid
7554 #endif
7555 
7556 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7557                                                abi_ulong target_addr)
7558 {
7559     struct target_sigevent *target_sevp;
7560 
7561     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7562         return -TARGET_EFAULT;
7563     }
7564 
7565     /* This union is awkward on 64 bit systems because it has a 32 bit
7566      * integer and a pointer in it; we follow the conversion approach
7567      * used for handling sigval types in signal.c so the guest should get
7568      * the correct value back even if we did a 64 bit byteswap and it's
7569      * using the 32 bit integer.
7570      */
7571     host_sevp->sigev_value.sival_ptr =
7572         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7573     host_sevp->sigev_signo =
7574         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7575     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7576     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7577 
7578     unlock_user_struct(target_sevp, target_addr, 1);
7579     return 0;
7580 }
7581 
7582 #if defined(TARGET_NR_mlockall)
7583 static inline int target_to_host_mlockall_arg(int arg)
7584 {
7585     int result = 0;
7586 
7587     if (arg & TARGET_MCL_CURRENT) {
7588         result |= MCL_CURRENT;
7589     }
7590     if (arg & TARGET_MCL_FUTURE) {
7591         result |= MCL_FUTURE;
7592     }
7593 #ifdef MCL_ONFAULT
7594     if (arg & TARGET_MCL_ONFAULT) {
7595         result |= MCL_ONFAULT;
7596     }
7597 #endif
7598 
7599     return result;
7600 }
7601 #endif
7602 
7603 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7604      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7605      defined(TARGET_NR_newfstatat))
7606 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7607                                              abi_ulong target_addr,
7608                                              struct stat *host_st)
7609 {
7610 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7611     if (cpu_env->eabi) {
7612         struct target_eabi_stat64 *target_st;
7613 
7614         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7615             return -TARGET_EFAULT;
7616         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7617         __put_user(host_st->st_dev, &target_st->st_dev);
7618         __put_user(host_st->st_ino, &target_st->st_ino);
7619 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7620         __put_user(host_st->st_ino, &target_st->__st_ino);
7621 #endif
7622         __put_user(host_st->st_mode, &target_st->st_mode);
7623         __put_user(host_st->st_nlink, &target_st->st_nlink);
7624         __put_user(host_st->st_uid, &target_st->st_uid);
7625         __put_user(host_st->st_gid, &target_st->st_gid);
7626         __put_user(host_st->st_rdev, &target_st->st_rdev);
7627         __put_user(host_st->st_size, &target_st->st_size);
7628         __put_user(host_st->st_blksize, &target_st->st_blksize);
7629         __put_user(host_st->st_blocks, &target_st->st_blocks);
7630         __put_user(host_st->st_atime, &target_st->target_st_atime);
7631         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7632         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7633 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7634         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7635         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7636         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7637 #endif
7638         unlock_user_struct(target_st, target_addr, 1);
7639     } else
7640 #endif
7641     {
7642 #if defined(TARGET_HAS_STRUCT_STAT64)
7643         struct target_stat64 *target_st;
7644 #else
7645         struct target_stat *target_st;
7646 #endif
7647 
7648         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7649             return -TARGET_EFAULT;
7650         memset(target_st, 0, sizeof(*target_st));
7651         __put_user(host_st->st_dev, &target_st->st_dev);
7652         __put_user(host_st->st_ino, &target_st->st_ino);
7653 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7654         __put_user(host_st->st_ino, &target_st->__st_ino);
7655 #endif
7656         __put_user(host_st->st_mode, &target_st->st_mode);
7657         __put_user(host_st->st_nlink, &target_st->st_nlink);
7658         __put_user(host_st->st_uid, &target_st->st_uid);
7659         __put_user(host_st->st_gid, &target_st->st_gid);
7660         __put_user(host_st->st_rdev, &target_st->st_rdev);
7661         /* XXX: better use of kernel struct */
7662         __put_user(host_st->st_size, &target_st->st_size);
7663         __put_user(host_st->st_blksize, &target_st->st_blksize);
7664         __put_user(host_st->st_blocks, &target_st->st_blocks);
7665         __put_user(host_st->st_atime, &target_st->target_st_atime);
7666         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7667         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7668 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7669         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7670         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7671         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7672 #endif
7673         unlock_user_struct(target_st, target_addr, 1);
7674     }
7675 
7676     return 0;
7677 }
7678 #endif
7679 
7680 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7681 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7682                                             abi_ulong target_addr)
7683 {
7684     struct target_statx *target_stx;
7685 
7686     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7687         return -TARGET_EFAULT;
7688     }
7689     memset(target_stx, 0, sizeof(*target_stx));
7690 
7691     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7692     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7693     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7694     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7695     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7696     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7697     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7698     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7699     __put_user(host_stx->stx_size, &target_stx->stx_size);
7700     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7701     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7702     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7703     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7704     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7705     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7706     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7707     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7708     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7709     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7710     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7711     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7712     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7713     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7714 
7715     unlock_user_struct(target_stx, target_addr, 1);
7716 
7717     return 0;
7718 }
7719 #endif
7720 
7721 static int do_sys_futex(int *uaddr, int op, int val,
7722                          const struct timespec *timeout, int *uaddr2,
7723                          int val3)
7724 {
7725 #if HOST_LONG_BITS == 64
7726 #if defined(__NR_futex)
7727     /* always a 64-bit time_t, it doesn't define _time64 version  */
7728     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7729 
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7736     }
7737 #endif
7738 #if defined(__NR_futex)
7739     /* old function on 32bit arch */
7740     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7741 #endif
7742 #endif /* HOST_LONG_BITS == 64 */
7743     g_assert_not_reached();
7744 }
7745 
7746 static int do_safe_futex(int *uaddr, int op, int val,
7747                          const struct timespec *timeout, int *uaddr2,
7748                          int val3)
7749 {
7750 #if HOST_LONG_BITS == 64
7751 #if defined(__NR_futex)
7752     /* always a 64-bit time_t, it doesn't define _time64 version  */
7753     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7754 #endif
7755 #else /* HOST_LONG_BITS == 64 */
7756 #if defined(__NR_futex_time64)
7757     if (sizeof(timeout->tv_sec) == 8) {
7758         /* _time64 function on 32bit arch */
7759         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7760                                            val3));
7761     }
7762 #endif
7763 #if defined(__NR_futex)
7764     /* old function on 32bit arch */
7765     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7766 #endif
7767 #endif /* HOST_LONG_BITS == 64 */
7768     return -TARGET_ENOSYS;
7769 }
7770 
7771 /* ??? Using host futex calls even when target atomic operations
7772    are not really atomic probably breaks things.  However implementing
7773    futexes locally would make futexes shared between multiple processes
7774    tricky.  However they're probably useless because guest atomic
7775    operations won't work either.  */
7776 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7777 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7778                     int op, int val, target_ulong timeout,
7779                     target_ulong uaddr2, int val3)
7780 {
7781     struct timespec ts, *pts = NULL;
7782     void *haddr2 = NULL;
7783     int base_op;
7784 
7785     /* We assume FUTEX_* constants are the same on both host and target. */
7786 #ifdef FUTEX_CMD_MASK
7787     base_op = op & FUTEX_CMD_MASK;
7788 #else
7789     base_op = op;
7790 #endif
7791     switch (base_op) {
7792     case FUTEX_WAIT:
7793     case FUTEX_WAIT_BITSET:
7794         val = tswap32(val);
7795         break;
7796     case FUTEX_WAIT_REQUEUE_PI:
7797         val = tswap32(val);
7798         haddr2 = g2h(cpu, uaddr2);
7799         break;
7800     case FUTEX_LOCK_PI:
7801     case FUTEX_LOCK_PI2:
7802         break;
7803     case FUTEX_WAKE:
7804     case FUTEX_WAKE_BITSET:
7805     case FUTEX_TRYLOCK_PI:
7806     case FUTEX_UNLOCK_PI:
7807         timeout = 0;
7808         break;
7809     case FUTEX_FD:
7810         val = target_to_host_signal(val);
7811         timeout = 0;
7812         break;
7813     case FUTEX_CMP_REQUEUE:
7814     case FUTEX_CMP_REQUEUE_PI:
7815         val3 = tswap32(val3);
7816         /* fall through */
7817     case FUTEX_REQUEUE:
7818     case FUTEX_WAKE_OP:
7819         /*
7820          * For these, the 4th argument is not TIMEOUT, but VAL2.
7821          * But the prototype of do_safe_futex takes a pointer, so
7822          * insert casts to satisfy the compiler.  We do not need
7823          * to tswap VAL2 since it's not compared to guest memory.
7824           */
7825         pts = (struct timespec *)(uintptr_t)timeout;
7826         timeout = 0;
7827         haddr2 = g2h(cpu, uaddr2);
7828         break;
7829     default:
7830         return -TARGET_ENOSYS;
7831     }
7832     if (timeout) {
7833         pts = &ts;
7834         if (time64
7835             ? target_to_host_timespec64(pts, timeout)
7836             : target_to_host_timespec(pts, timeout)) {
7837             return -TARGET_EFAULT;
7838         }
7839     }
7840     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7841 }
7842 #endif
7843 
7844 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7845 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7846                                      abi_long handle, abi_long mount_id,
7847                                      abi_long flags)
7848 {
7849     struct file_handle *target_fh;
7850     struct file_handle *fh;
7851     int mid = 0;
7852     abi_long ret;
7853     char *name;
7854     unsigned int size, total_size;
7855 
7856     if (get_user_s32(size, handle)) {
7857         return -TARGET_EFAULT;
7858     }
7859 
7860     name = lock_user_string(pathname);
7861     if (!name) {
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     total_size = sizeof(struct file_handle) + size;
7866     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7867     if (!target_fh) {
7868         unlock_user(name, pathname, 0);
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     fh = g_malloc0(total_size);
7873     fh->handle_bytes = size;
7874 
7875     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7876     unlock_user(name, pathname, 0);
7877 
7878     /* man name_to_handle_at(2):
7879      * Other than the use of the handle_bytes field, the caller should treat
7880      * the file_handle structure as an opaque data type
7881      */
7882 
7883     memcpy(target_fh, fh, total_size);
7884     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7885     target_fh->handle_type = tswap32(fh->handle_type);
7886     g_free(fh);
7887     unlock_user(target_fh, handle, total_size);
7888 
7889     if (put_user_s32(mid, mount_id)) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     return ret;
7894 
7895 }
7896 #endif
7897 
7898 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7899 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7900                                      abi_long flags)
7901 {
7902     struct file_handle *target_fh;
7903     struct file_handle *fh;
7904     unsigned int size, total_size;
7905     abi_long ret;
7906 
7907     if (get_user_s32(size, handle)) {
7908         return -TARGET_EFAULT;
7909     }
7910 
7911     total_size = sizeof(struct file_handle) + size;
7912     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7913     if (!target_fh) {
7914         return -TARGET_EFAULT;
7915     }
7916 
7917     fh = g_memdup(target_fh, total_size);
7918     fh->handle_bytes = size;
7919     fh->handle_type = tswap32(target_fh->handle_type);
7920 
7921     ret = get_errno(open_by_handle_at(mount_fd, fh,
7922                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7923 
7924     g_free(fh);
7925 
7926     unlock_user(target_fh, handle, total_size);
7927 
7928     return ret;
7929 }
7930 #endif
7931 
7932 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7933 
7934 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7935 {
7936     int host_flags;
7937     target_sigset_t *target_mask;
7938     sigset_t host_mask;
7939     abi_long ret;
7940 
7941     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7942         return -TARGET_EINVAL;
7943     }
7944     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     target_to_host_sigset(&host_mask, target_mask);
7949 
7950     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7951 
7952     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7953     if (ret >= 0) {
7954         fd_trans_register(ret, &target_signalfd_trans);
7955     }
7956 
7957     unlock_user_struct(target_mask, mask, 0);
7958 
7959     return ret;
7960 }
7961 #endif
7962 
7963 /* Map host to target signal numbers for the wait family of syscalls.
7964    Assume all other status bits are the same.  */
7965 int host_to_target_waitstatus(int status)
7966 {
7967     if (WIFSIGNALED(status)) {
7968         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7969     }
7970     if (WIFSTOPPED(status)) {
7971         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7972                | (status & 0xff);
7973     }
7974     return status;
7975 }
7976 
7977 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7978 {
7979     CPUState *cpu = env_cpu(cpu_env);
7980     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7981     int i;
7982 
7983     for (i = 0; i < bprm->argc; i++) {
7984         size_t len = strlen(bprm->argv[i]) + 1;
7985 
7986         if (write(fd, bprm->argv[i], len) != len) {
7987             return -1;
7988         }
7989     }
7990 
7991     return 0;
7992 }
7993 
7994 static int open_self_maps(CPUArchState *cpu_env, int fd)
7995 {
7996     CPUState *cpu = env_cpu(cpu_env);
7997     TaskState *ts = cpu->opaque;
7998     GSList *map_info = read_self_maps();
7999     GSList *s;
8000     int count;
8001 
8002     for (s = map_info; s; s = g_slist_next(s)) {
8003         MapInfo *e = (MapInfo *) s->data;
8004 
8005         if (h2g_valid(e->start)) {
8006             unsigned long min = e->start;
8007             unsigned long max = e->end;
8008             int flags = page_get_flags(h2g(min));
8009             const char *path;
8010 
8011             max = h2g_valid(max - 1) ?
8012                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8013 
8014             if (page_check_range(h2g(min), max - min, flags) == -1) {
8015                 continue;
8016             }
8017 
8018 #ifdef TARGET_HPPA
8019             if (h2g(max) == ts->info->stack_limit) {
8020 #else
8021             if (h2g(min) == ts->info->stack_limit) {
8022 #endif
8023                 path = "[stack]";
8024             } else {
8025                 path = e->path;
8026             }
8027 
8028             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8029                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8030                             h2g(min), h2g(max - 1) + 1,
8031                             (flags & PAGE_READ) ? 'r' : '-',
8032                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8033                             (flags & PAGE_EXEC) ? 'x' : '-',
8034                             e->is_priv ? 'p' : 's',
8035                             (uint64_t) e->offset, e->dev, e->inode);
8036             if (path) {
8037                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8038             } else {
8039                 dprintf(fd, "\n");
8040             }
8041         }
8042     }
8043 
8044     free_self_maps(map_info);
8045 
8046 #ifdef TARGET_VSYSCALL_PAGE
8047     /*
8048      * We only support execution from the vsyscall page.
8049      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8050      */
8051     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8052                     " --xp 00000000 00:00 0",
8053                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8054     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8055 #endif
8056 
8057     return 0;
8058 }
8059 
8060 static int open_self_stat(CPUArchState *cpu_env, int fd)
8061 {
8062     CPUState *cpu = env_cpu(cpu_env);
8063     TaskState *ts = cpu->opaque;
8064     g_autoptr(GString) buf = g_string_new(NULL);
8065     int i;
8066 
8067     for (i = 0; i < 44; i++) {
8068         if (i == 0) {
8069             /* pid */
8070             g_string_printf(buf, FMT_pid " ", getpid());
8071         } else if (i == 1) {
8072             /* app name */
8073             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8074             bin = bin ? bin + 1 : ts->bprm->argv[0];
8075             g_string_printf(buf, "(%.15s) ", bin);
8076         } else if (i == 3) {
8077             /* ppid */
8078             g_string_printf(buf, FMT_pid " ", getppid());
8079         } else if (i == 21) {
8080             /* starttime */
8081             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8082         } else if (i == 27) {
8083             /* stack bottom */
8084             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8085         } else {
8086             /* for the rest, there is MasterCard */
8087             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8088         }
8089 
8090         if (write(fd, buf->str, buf->len) != buf->len) {
8091             return -1;
8092         }
8093     }
8094 
8095     return 0;
8096 }
8097 
8098 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8099 {
8100     CPUState *cpu = env_cpu(cpu_env);
8101     TaskState *ts = cpu->opaque;
8102     abi_ulong auxv = ts->info->saved_auxv;
8103     abi_ulong len = ts->info->auxv_len;
8104     char *ptr;
8105 
8106     /*
8107      * Auxiliary vector is stored in target process stack.
8108      * read in whole auxv vector and copy it to file
8109      */
8110     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8111     if (ptr != NULL) {
8112         while (len > 0) {
8113             ssize_t r;
8114             r = write(fd, ptr, len);
8115             if (r <= 0) {
8116                 break;
8117             }
8118             len -= r;
8119             ptr += r;
8120         }
8121         lseek(fd, 0, SEEK_SET);
8122         unlock_user(ptr, auxv, len);
8123     }
8124 
8125     return 0;
8126 }
8127 
8128 static int is_proc_myself(const char *filename, const char *entry)
8129 {
8130     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8131         filename += strlen("/proc/");
8132         if (!strncmp(filename, "self/", strlen("self/"))) {
8133             filename += strlen("self/");
8134         } else if (*filename >= '1' && *filename <= '9') {
8135             char myself[80];
8136             snprintf(myself, sizeof(myself), "%d/", getpid());
8137             if (!strncmp(filename, myself, strlen(myself))) {
8138                 filename += strlen(myself);
8139             } else {
8140                 return 0;
8141             }
8142         } else {
8143             return 0;
8144         }
8145         if (!strcmp(filename, entry)) {
8146             return 1;
8147         }
8148     }
8149     return 0;
8150 }
8151 
8152 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8153                       const char *fmt, int code)
8154 {
8155     if (logfile) {
8156         CPUState *cs = env_cpu(env);
8157 
8158         fprintf(logfile, fmt, code);
8159         fprintf(logfile, "Failing executable: %s\n", exec_path);
8160         cpu_dump_state(cs, logfile, 0);
8161         open_self_maps(env, fileno(logfile));
8162     }
8163 }
8164 
8165 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8166 {
8167     /* dump to console */
8168     excp_dump_file(stderr, env, fmt, code);
8169 
8170     /* dump to log file */
8171     if (qemu_log_separate()) {
8172         FILE *logfile = qemu_log_trylock();
8173 
8174         excp_dump_file(logfile, env, fmt, code);
8175         qemu_log_unlock(logfile);
8176     }
8177 }
8178 
8179 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8180     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8181 static int is_proc(const char *filename, const char *entry)
8182 {
8183     return strcmp(filename, entry) == 0;
8184 }
8185 #endif
8186 
8187 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8188 static int open_net_route(CPUArchState *cpu_env, int fd)
8189 {
8190     FILE *fp;
8191     char *line = NULL;
8192     size_t len = 0;
8193     ssize_t read;
8194 
8195     fp = fopen("/proc/net/route", "r");
8196     if (fp == NULL) {
8197         return -1;
8198     }
8199 
8200     /* read header */
8201 
8202     read = getline(&line, &len, fp);
8203     dprintf(fd, "%s", line);
8204 
8205     /* read routes */
8206 
8207     while ((read = getline(&line, &len, fp)) != -1) {
8208         char iface[16];
8209         uint32_t dest, gw, mask;
8210         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8211         int fields;
8212 
8213         fields = sscanf(line,
8214                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8215                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8216                         &mask, &mtu, &window, &irtt);
8217         if (fields != 11) {
8218             continue;
8219         }
8220         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8221                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8222                 metric, tswap32(mask), mtu, window, irtt);
8223     }
8224 
8225     free(line);
8226     fclose(fp);
8227 
8228     return 0;
8229 }
8230 #endif
8231 
8232 #if defined(TARGET_SPARC)
8233 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8234 {
8235     dprintf(fd, "type\t\t: sun4u\n");
8236     return 0;
8237 }
8238 #endif
8239 
8240 #if defined(TARGET_HPPA)
8241 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8242 {
8243     int i, num_cpus;
8244 
8245     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8246     for (i = 0; i < num_cpus; i++) {
8247         dprintf(fd, "processor\t: %d\n", i);
8248         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8249         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8250         dprintf(fd, "capabilities\t: os32\n");
8251         dprintf(fd, "model\t\t: 9000/778/B160L - "
8252                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8253     }
8254     return 0;
8255 }
8256 #endif
8257 
8258 #if defined(TARGET_M68K)
8259 static int open_hardware(CPUArchState *cpu_env, int fd)
8260 {
8261     dprintf(fd, "Model:\t\tqemu-m68k\n");
8262     return 0;
8263 }
8264 #endif
8265 
8266 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8267 {
8268     struct fake_open {
8269         const char *filename;
8270         int (*fill)(CPUArchState *cpu_env, int fd);
8271         int (*cmp)(const char *s1, const char *s2);
8272     };
8273     const struct fake_open *fake_open;
8274     static const struct fake_open fakes[] = {
8275         { "maps", open_self_maps, is_proc_myself },
8276         { "stat", open_self_stat, is_proc_myself },
8277         { "auxv", open_self_auxv, is_proc_myself },
8278         { "cmdline", open_self_cmdline, is_proc_myself },
8279 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8280         { "/proc/net/route", open_net_route, is_proc },
8281 #endif
8282 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8283         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8284 #endif
8285 #if defined(TARGET_M68K)
8286         { "/proc/hardware", open_hardware, is_proc },
8287 #endif
8288         { NULL, NULL, NULL }
8289     };
8290 
8291     if (is_proc_myself(pathname, "exe")) {
8292         return safe_openat(dirfd, exec_path, flags, mode);
8293     }
8294 
8295     for (fake_open = fakes; fake_open->filename; fake_open++) {
8296         if (fake_open->cmp(pathname, fake_open->filename)) {
8297             break;
8298         }
8299     }
8300 
8301     if (fake_open->filename) {
8302         const char *tmpdir;
8303         char filename[PATH_MAX];
8304         int fd, r;
8305 
8306         fd = memfd_create("qemu-open", 0);
8307         if (fd < 0) {
8308             if (errno != ENOSYS) {
8309                 return fd;
8310             }
8311             /* create temporary file to map stat to */
8312             tmpdir = getenv("TMPDIR");
8313             if (!tmpdir)
8314                 tmpdir = "/tmp";
8315             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8316             fd = mkstemp(filename);
8317             if (fd < 0) {
8318                 return fd;
8319             }
8320             unlink(filename);
8321         }
8322 
8323         if ((r = fake_open->fill(cpu_env, fd))) {
8324             int e = errno;
8325             close(fd);
8326             errno = e;
8327             return r;
8328         }
8329         lseek(fd, 0, SEEK_SET);
8330 
8331         return fd;
8332     }
8333 
8334     return safe_openat(dirfd, path(pathname), flags, mode);
8335 }
8336 
8337 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8338                        abi_long pathname, abi_long guest_argp,
8339                        abi_long guest_envp, int flags)
8340 {
8341     int ret;
8342     char **argp, **envp;
8343     int argc, envc;
8344     abi_ulong gp;
8345     abi_ulong addr;
8346     char **q;
8347     void *p;
8348 
8349     argc = 0;
8350 
8351     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8352         if (get_user_ual(addr, gp)) {
8353             return -TARGET_EFAULT;
8354         }
8355         if (!addr) {
8356             break;
8357         }
8358         argc++;
8359     }
8360     envc = 0;
8361     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8362         if (get_user_ual(addr, gp)) {
8363             return -TARGET_EFAULT;
8364         }
8365         if (!addr) {
8366             break;
8367         }
8368         envc++;
8369     }
8370 
8371     argp = g_new0(char *, argc + 1);
8372     envp = g_new0(char *, envc + 1);
8373 
8374     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8375         if (get_user_ual(addr, gp)) {
8376             goto execve_efault;
8377         }
8378         if (!addr) {
8379             break;
8380         }
8381         *q = lock_user_string(addr);
8382         if (!*q) {
8383             goto execve_efault;
8384         }
8385     }
8386     *q = NULL;
8387 
8388     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8389         if (get_user_ual(addr, gp)) {
8390             goto execve_efault;
8391         }
8392         if (!addr) {
8393             break;
8394         }
8395         *q = lock_user_string(addr);
8396         if (!*q) {
8397             goto execve_efault;
8398         }
8399     }
8400     *q = NULL;
8401 
8402     /*
8403      * Although execve() is not an interruptible syscall it is
8404      * a special case where we must use the safe_syscall wrapper:
8405      * if we allow a signal to happen before we make the host
8406      * syscall then we will 'lose' it, because at the point of
8407      * execve the process leaves QEMU's control. So we use the
8408      * safe syscall wrapper to ensure that we either take the
8409      * signal as a guest signal, or else it does not happen
8410      * before the execve completes and makes it the other
8411      * program's problem.
8412      */
8413     p = lock_user_string(pathname);
8414     if (!p) {
8415         goto execve_efault;
8416     }
8417 
8418     if (is_proc_myself(p, "exe")) {
8419         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8420     } else {
8421         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8422     }
8423 
8424     unlock_user(p, pathname, 0);
8425 
8426     goto execve_end;
8427 
8428 execve_efault:
8429     ret = -TARGET_EFAULT;
8430 
8431 execve_end:
8432     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8433         if (get_user_ual(addr, gp) || !addr) {
8434             break;
8435         }
8436         unlock_user(*q, addr, 0);
8437     }
8438     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8439         if (get_user_ual(addr, gp) || !addr) {
8440             break;
8441         }
8442         unlock_user(*q, addr, 0);
8443     }
8444 
8445     g_free(argp);
8446     g_free(envp);
8447     return ret;
8448 }
8449 
8450 #define TIMER_MAGIC 0x0caf0000
8451 #define TIMER_MAGIC_MASK 0xffff0000
8452 
8453 /* Convert QEMU provided timer ID back to internal 16bit index format */
8454 static target_timer_t get_timer_id(abi_long arg)
8455 {
8456     target_timer_t timerid = arg;
8457 
8458     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8459         return -TARGET_EINVAL;
8460     }
8461 
8462     timerid &= 0xffff;
8463 
8464     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8465         return -TARGET_EINVAL;
8466     }
8467 
8468     return timerid;
8469 }
8470 
8471 static int target_to_host_cpu_mask(unsigned long *host_mask,
8472                                    size_t host_size,
8473                                    abi_ulong target_addr,
8474                                    size_t target_size)
8475 {
8476     unsigned target_bits = sizeof(abi_ulong) * 8;
8477     unsigned host_bits = sizeof(*host_mask) * 8;
8478     abi_ulong *target_mask;
8479     unsigned i, j;
8480 
8481     assert(host_size >= target_size);
8482 
8483     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8484     if (!target_mask) {
8485         return -TARGET_EFAULT;
8486     }
8487     memset(host_mask, 0, host_size);
8488 
8489     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8490         unsigned bit = i * target_bits;
8491         abi_ulong val;
8492 
8493         __get_user(val, &target_mask[i]);
8494         for (j = 0; j < target_bits; j++, bit++) {
8495             if (val & (1UL << j)) {
8496                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8497             }
8498         }
8499     }
8500 
8501     unlock_user(target_mask, target_addr, 0);
8502     return 0;
8503 }
8504 
8505 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8506                                    size_t host_size,
8507                                    abi_ulong target_addr,
8508                                    size_t target_size)
8509 {
8510     unsigned target_bits = sizeof(abi_ulong) * 8;
8511     unsigned host_bits = sizeof(*host_mask) * 8;
8512     abi_ulong *target_mask;
8513     unsigned i, j;
8514 
8515     assert(host_size >= target_size);
8516 
8517     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8518     if (!target_mask) {
8519         return -TARGET_EFAULT;
8520     }
8521 
8522     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8523         unsigned bit = i * target_bits;
8524         abi_ulong val = 0;
8525 
8526         for (j = 0; j < target_bits; j++, bit++) {
8527             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8528                 val |= 1UL << j;
8529             }
8530         }
8531         __put_user(val, &target_mask[i]);
8532     }
8533 
8534     unlock_user(target_mask, target_addr, target_size);
8535     return 0;
8536 }
8537 
8538 #ifdef TARGET_NR_getdents
8539 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8540 {
8541     g_autofree void *hdirp = NULL;
8542     void *tdirp;
8543     int hlen, hoff, toff;
8544     int hreclen, treclen;
8545     off64_t prev_diroff = 0;
8546 
8547     hdirp = g_try_malloc(count);
8548     if (!hdirp) {
8549         return -TARGET_ENOMEM;
8550     }
8551 
8552 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8553     hlen = sys_getdents(dirfd, hdirp, count);
8554 #else
8555     hlen = sys_getdents64(dirfd, hdirp, count);
8556 #endif
8557 
8558     hlen = get_errno(hlen);
8559     if (is_error(hlen)) {
8560         return hlen;
8561     }
8562 
8563     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8564     if (!tdirp) {
8565         return -TARGET_EFAULT;
8566     }
8567 
8568     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8569 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8570         struct linux_dirent *hde = hdirp + hoff;
8571 #else
8572         struct linux_dirent64 *hde = hdirp + hoff;
8573 #endif
8574         struct target_dirent *tde = tdirp + toff;
8575         int namelen;
8576         uint8_t type;
8577 
8578         namelen = strlen(hde->d_name);
8579         hreclen = hde->d_reclen;
8580         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8581         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8582 
8583         if (toff + treclen > count) {
8584             /*
8585              * If the host struct is smaller than the target struct, or
8586              * requires less alignment and thus packs into less space,
8587              * then the host can return more entries than we can pass
8588              * on to the guest.
8589              */
8590             if (toff == 0) {
8591                 toff = -TARGET_EINVAL; /* result buffer is too small */
8592                 break;
8593             }
8594             /*
8595              * Return what we have, resetting the file pointer to the
8596              * location of the first record not returned.
8597              */
8598             lseek64(dirfd, prev_diroff, SEEK_SET);
8599             break;
8600         }
8601 
8602         prev_diroff = hde->d_off;
8603         tde->d_ino = tswapal(hde->d_ino);
8604         tde->d_off = tswapal(hde->d_off);
8605         tde->d_reclen = tswap16(treclen);
8606         memcpy(tde->d_name, hde->d_name, namelen + 1);
8607 
8608         /*
8609          * The getdents type is in what was formerly a padding byte at the
8610          * end of the structure.
8611          */
8612 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8613         type = *((uint8_t *)hde + hreclen - 1);
8614 #else
8615         type = hde->d_type;
8616 #endif
8617         *((uint8_t *)tde + treclen - 1) = type;
8618     }
8619 
8620     unlock_user(tdirp, arg2, toff);
8621     return toff;
8622 }
8623 #endif /* TARGET_NR_getdents */
8624 
8625 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8626 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8627 {
8628     g_autofree void *hdirp = NULL;
8629     void *tdirp;
8630     int hlen, hoff, toff;
8631     int hreclen, treclen;
8632     off64_t prev_diroff = 0;
8633 
8634     hdirp = g_try_malloc(count);
8635     if (!hdirp) {
8636         return -TARGET_ENOMEM;
8637     }
8638 
8639     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8640     if (is_error(hlen)) {
8641         return hlen;
8642     }
8643 
8644     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8645     if (!tdirp) {
8646         return -TARGET_EFAULT;
8647     }
8648 
8649     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8650         struct linux_dirent64 *hde = hdirp + hoff;
8651         struct target_dirent64 *tde = tdirp + toff;
8652         int namelen;
8653 
8654         namelen = strlen(hde->d_name) + 1;
8655         hreclen = hde->d_reclen;
8656         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8657         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8658 
8659         if (toff + treclen > count) {
8660             /*
8661              * If the host struct is smaller than the target struct, or
8662              * requires less alignment and thus packs into less space,
8663              * then the host can return more entries than we can pass
8664              * on to the guest.
8665              */
8666             if (toff == 0) {
8667                 toff = -TARGET_EINVAL; /* result buffer is too small */
8668                 break;
8669             }
8670             /*
8671              * Return what we have, resetting the file pointer to the
8672              * location of the first record not returned.
8673              */
8674             lseek64(dirfd, prev_diroff, SEEK_SET);
8675             break;
8676         }
8677 
8678         prev_diroff = hde->d_off;
8679         tde->d_ino = tswap64(hde->d_ino);
8680         tde->d_off = tswap64(hde->d_off);
8681         tde->d_reclen = tswap16(treclen);
8682         tde->d_type = hde->d_type;
8683         memcpy(tde->d_name, hde->d_name, namelen);
8684     }
8685 
8686     unlock_user(tdirp, arg2, toff);
8687     return toff;
8688 }
8689 #endif /* TARGET_NR_getdents64 */
8690 
8691 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8692 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8693 #endif
8694 
8695 /* This is an internal helper for do_syscall so that it is easier
8696  * to have a single return point, so that actions, such as logging
8697  * of syscall results, can be performed.
8698  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8699  */
8700 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8701                             abi_long arg2, abi_long arg3, abi_long arg4,
8702                             abi_long arg5, abi_long arg6, abi_long arg7,
8703                             abi_long arg8)
8704 {
8705     CPUState *cpu = env_cpu(cpu_env);
8706     abi_long ret;
8707 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8708     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8709     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8710     || defined(TARGET_NR_statx)
8711     struct stat st;
8712 #endif
8713 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8714     || defined(TARGET_NR_fstatfs)
8715     struct statfs stfs;
8716 #endif
8717     void *p;
8718 
8719     switch(num) {
8720     case TARGET_NR_exit:
8721         /* In old applications this may be used to implement _exit(2).
8722            However in threaded applications it is used for thread termination,
8723            and _exit_group is used for application termination.
8724            Do thread termination if we have more then one thread.  */
8725 
8726         if (block_signals()) {
8727             return -QEMU_ERESTARTSYS;
8728         }
8729 
8730         pthread_mutex_lock(&clone_lock);
8731 
8732         if (CPU_NEXT(first_cpu)) {
8733             TaskState *ts = cpu->opaque;
8734 
8735             if (ts->child_tidptr) {
8736                 put_user_u32(0, ts->child_tidptr);
8737                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8738                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8739             }
8740 
8741             object_unparent(OBJECT(cpu));
8742             object_unref(OBJECT(cpu));
8743             /*
8744              * At this point the CPU should be unrealized and removed
8745              * from cpu lists. We can clean-up the rest of the thread
8746              * data without the lock held.
8747              */
8748 
8749             pthread_mutex_unlock(&clone_lock);
8750 
8751             thread_cpu = NULL;
8752             g_free(ts);
8753             rcu_unregister_thread();
8754             pthread_exit(NULL);
8755         }
8756 
8757         pthread_mutex_unlock(&clone_lock);
8758         preexit_cleanup(cpu_env, arg1);
8759         _exit(arg1);
8760         return 0; /* avoid warning */
8761     case TARGET_NR_read:
8762         if (arg2 == 0 && arg3 == 0) {
8763             return get_errno(safe_read(arg1, 0, 0));
8764         } else {
8765             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8766                 return -TARGET_EFAULT;
8767             ret = get_errno(safe_read(arg1, p, arg3));
8768             if (ret >= 0 &&
8769                 fd_trans_host_to_target_data(arg1)) {
8770                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8771             }
8772             unlock_user(p, arg2, ret);
8773         }
8774         return ret;
8775     case TARGET_NR_write:
8776         if (arg2 == 0 && arg3 == 0) {
8777             return get_errno(safe_write(arg1, 0, 0));
8778         }
8779         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8780             return -TARGET_EFAULT;
8781         if (fd_trans_target_to_host_data(arg1)) {
8782             void *copy = g_malloc(arg3);
8783             memcpy(copy, p, arg3);
8784             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8785             if (ret >= 0) {
8786                 ret = get_errno(safe_write(arg1, copy, ret));
8787             }
8788             g_free(copy);
8789         } else {
8790             ret = get_errno(safe_write(arg1, p, arg3));
8791         }
8792         unlock_user(p, arg2, 0);
8793         return ret;
8794 
8795 #ifdef TARGET_NR_open
8796     case TARGET_NR_open:
8797         if (!(p = lock_user_string(arg1)))
8798             return -TARGET_EFAULT;
8799         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8800                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8801                                   arg3));
8802         fd_trans_unregister(ret);
8803         unlock_user(p, arg1, 0);
8804         return ret;
8805 #endif
8806     case TARGET_NR_openat:
8807         if (!(p = lock_user_string(arg2)))
8808             return -TARGET_EFAULT;
8809         ret = get_errno(do_openat(cpu_env, arg1, p,
8810                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8811                                   arg4));
8812         fd_trans_unregister(ret);
8813         unlock_user(p, arg2, 0);
8814         return ret;
8815 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8816     case TARGET_NR_name_to_handle_at:
8817         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8818         return ret;
8819 #endif
8820 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8821     case TARGET_NR_open_by_handle_at:
8822         ret = do_open_by_handle_at(arg1, arg2, arg3);
8823         fd_trans_unregister(ret);
8824         return ret;
8825 #endif
8826 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8827     case TARGET_NR_pidfd_open:
8828         return get_errno(pidfd_open(arg1, arg2));
8829 #endif
8830 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8831     case TARGET_NR_pidfd_send_signal:
8832         {
8833             siginfo_t uinfo, *puinfo;
8834 
8835             if (arg3) {
8836                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8837                 if (!p) {
8838                     return -TARGET_EFAULT;
8839                  }
8840                  target_to_host_siginfo(&uinfo, p);
8841                  unlock_user(p, arg3, 0);
8842                  puinfo = &uinfo;
8843             } else {
8844                  puinfo = NULL;
8845             }
8846             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8847                                               puinfo, arg4));
8848         }
8849         return ret;
8850 #endif
8851 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8852     case TARGET_NR_pidfd_getfd:
8853         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8854 #endif
8855     case TARGET_NR_close:
8856         fd_trans_unregister(arg1);
8857         return get_errno(close(arg1));
8858 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8859     case TARGET_NR_close_range:
8860         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8861         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8862             abi_long fd, maxfd;
8863             maxfd = MIN(arg2, target_fd_max);
8864             for (fd = arg1; fd < maxfd; fd++) {
8865                 fd_trans_unregister(fd);
8866             }
8867         }
8868         return ret;
8869 #endif
8870 
8871     case TARGET_NR_brk:
8872         return do_brk(arg1);
8873 #ifdef TARGET_NR_fork
8874     case TARGET_NR_fork:
8875         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8876 #endif
8877 #ifdef TARGET_NR_waitpid
8878     case TARGET_NR_waitpid:
8879         {
8880             int status;
8881             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8882             if (!is_error(ret) && arg2 && ret
8883                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8884                 return -TARGET_EFAULT;
8885         }
8886         return ret;
8887 #endif
8888 #ifdef TARGET_NR_waitid
8889     case TARGET_NR_waitid:
8890         {
8891             siginfo_t info;
8892             info.si_pid = 0;
8893             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8894             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8895                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8896                     return -TARGET_EFAULT;
8897                 host_to_target_siginfo(p, &info);
8898                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8899             }
8900         }
8901         return ret;
8902 #endif
8903 #ifdef TARGET_NR_creat /* not on alpha */
8904     case TARGET_NR_creat:
8905         if (!(p = lock_user_string(arg1)))
8906             return -TARGET_EFAULT;
8907         ret = get_errno(creat(p, arg2));
8908         fd_trans_unregister(ret);
8909         unlock_user(p, arg1, 0);
8910         return ret;
8911 #endif
8912 #ifdef TARGET_NR_link
8913     case TARGET_NR_link:
8914         {
8915             void * p2;
8916             p = lock_user_string(arg1);
8917             p2 = lock_user_string(arg2);
8918             if (!p || !p2)
8919                 ret = -TARGET_EFAULT;
8920             else
8921                 ret = get_errno(link(p, p2));
8922             unlock_user(p2, arg2, 0);
8923             unlock_user(p, arg1, 0);
8924         }
8925         return ret;
8926 #endif
8927 #if defined(TARGET_NR_linkat)
8928     case TARGET_NR_linkat:
8929         {
8930             void * p2 = NULL;
8931             if (!arg2 || !arg4)
8932                 return -TARGET_EFAULT;
8933             p  = lock_user_string(arg2);
8934             p2 = lock_user_string(arg4);
8935             if (!p || !p2)
8936                 ret = -TARGET_EFAULT;
8937             else
8938                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8939             unlock_user(p, arg2, 0);
8940             unlock_user(p2, arg4, 0);
8941         }
8942         return ret;
8943 #endif
8944 #ifdef TARGET_NR_unlink
8945     case TARGET_NR_unlink:
8946         if (!(p = lock_user_string(arg1)))
8947             return -TARGET_EFAULT;
8948         ret = get_errno(unlink(p));
8949         unlock_user(p, arg1, 0);
8950         return ret;
8951 #endif
8952 #if defined(TARGET_NR_unlinkat)
8953     case TARGET_NR_unlinkat:
8954         if (!(p = lock_user_string(arg2)))
8955             return -TARGET_EFAULT;
8956         ret = get_errno(unlinkat(arg1, p, arg3));
8957         unlock_user(p, arg2, 0);
8958         return ret;
8959 #endif
8960     case TARGET_NR_execveat:
8961         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8962     case TARGET_NR_execve:
8963         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8964     case TARGET_NR_chdir:
8965         if (!(p = lock_user_string(arg1)))
8966             return -TARGET_EFAULT;
8967         ret = get_errno(chdir(p));
8968         unlock_user(p, arg1, 0);
8969         return ret;
8970 #ifdef TARGET_NR_time
8971     case TARGET_NR_time:
8972         {
8973             time_t host_time;
8974             ret = get_errno(time(&host_time));
8975             if (!is_error(ret)
8976                 && arg1
8977                 && put_user_sal(host_time, arg1))
8978                 return -TARGET_EFAULT;
8979         }
8980         return ret;
8981 #endif
8982 #ifdef TARGET_NR_mknod
8983     case TARGET_NR_mknod:
8984         if (!(p = lock_user_string(arg1)))
8985             return -TARGET_EFAULT;
8986         ret = get_errno(mknod(p, arg2, arg3));
8987         unlock_user(p, arg1, 0);
8988         return ret;
8989 #endif
8990 #if defined(TARGET_NR_mknodat)
8991     case TARGET_NR_mknodat:
8992         if (!(p = lock_user_string(arg2)))
8993             return -TARGET_EFAULT;
8994         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8995         unlock_user(p, arg2, 0);
8996         return ret;
8997 #endif
8998 #ifdef TARGET_NR_chmod
8999     case TARGET_NR_chmod:
9000         if (!(p = lock_user_string(arg1)))
9001             return -TARGET_EFAULT;
9002         ret = get_errno(chmod(p, arg2));
9003         unlock_user(p, arg1, 0);
9004         return ret;
9005 #endif
9006 #ifdef TARGET_NR_lseek
9007     case TARGET_NR_lseek:
9008         return get_errno(lseek(arg1, arg2, arg3));
9009 #endif
9010 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9011     /* Alpha specific */
9012     case TARGET_NR_getxpid:
9013         cpu_env->ir[IR_A4] = getppid();
9014         return get_errno(getpid());
9015 #endif
9016 #ifdef TARGET_NR_getpid
9017     case TARGET_NR_getpid:
9018         return get_errno(getpid());
9019 #endif
9020     case TARGET_NR_mount:
9021         {
9022             /* need to look at the data field */
9023             void *p2, *p3;
9024 
9025             if (arg1) {
9026                 p = lock_user_string(arg1);
9027                 if (!p) {
9028                     return -TARGET_EFAULT;
9029                 }
9030             } else {
9031                 p = NULL;
9032             }
9033 
9034             p2 = lock_user_string(arg2);
9035             if (!p2) {
9036                 if (arg1) {
9037                     unlock_user(p, arg1, 0);
9038                 }
9039                 return -TARGET_EFAULT;
9040             }
9041 
9042             if (arg3) {
9043                 p3 = lock_user_string(arg3);
9044                 if (!p3) {
9045                     if (arg1) {
9046                         unlock_user(p, arg1, 0);
9047                     }
9048                     unlock_user(p2, arg2, 0);
9049                     return -TARGET_EFAULT;
9050                 }
9051             } else {
9052                 p3 = NULL;
9053             }
9054 
9055             /* FIXME - arg5 should be locked, but it isn't clear how to
9056              * do that since it's not guaranteed to be a NULL-terminated
9057              * string.
9058              */
9059             if (!arg5) {
9060                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9061             } else {
9062                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9063             }
9064             ret = get_errno(ret);
9065 
9066             if (arg1) {
9067                 unlock_user(p, arg1, 0);
9068             }
9069             unlock_user(p2, arg2, 0);
9070             if (arg3) {
9071                 unlock_user(p3, arg3, 0);
9072             }
9073         }
9074         return ret;
9075 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9076 #if defined(TARGET_NR_umount)
9077     case TARGET_NR_umount:
9078 #endif
9079 #if defined(TARGET_NR_oldumount)
9080     case TARGET_NR_oldumount:
9081 #endif
9082         if (!(p = lock_user_string(arg1)))
9083             return -TARGET_EFAULT;
9084         ret = get_errno(umount(p));
9085         unlock_user(p, arg1, 0);
9086         return ret;
9087 #endif
9088 #ifdef TARGET_NR_stime /* not on alpha */
9089     case TARGET_NR_stime:
9090         {
9091             struct timespec ts;
9092             ts.tv_nsec = 0;
9093             if (get_user_sal(ts.tv_sec, arg1)) {
9094                 return -TARGET_EFAULT;
9095             }
9096             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9097         }
9098 #endif
9099 #ifdef TARGET_NR_alarm /* not on alpha */
9100     case TARGET_NR_alarm:
9101         return alarm(arg1);
9102 #endif
9103 #ifdef TARGET_NR_pause /* not on alpha */
9104     case TARGET_NR_pause:
9105         if (!block_signals()) {
9106             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9107         }
9108         return -TARGET_EINTR;
9109 #endif
9110 #ifdef TARGET_NR_utime
9111     case TARGET_NR_utime:
9112         {
9113             struct utimbuf tbuf, *host_tbuf;
9114             struct target_utimbuf *target_tbuf;
9115             if (arg2) {
9116                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9117                     return -TARGET_EFAULT;
9118                 tbuf.actime = tswapal(target_tbuf->actime);
9119                 tbuf.modtime = tswapal(target_tbuf->modtime);
9120                 unlock_user_struct(target_tbuf, arg2, 0);
9121                 host_tbuf = &tbuf;
9122             } else {
9123                 host_tbuf = NULL;
9124             }
9125             if (!(p = lock_user_string(arg1)))
9126                 return -TARGET_EFAULT;
9127             ret = get_errno(utime(p, host_tbuf));
9128             unlock_user(p, arg1, 0);
9129         }
9130         return ret;
9131 #endif
9132 #ifdef TARGET_NR_utimes
9133     case TARGET_NR_utimes:
9134         {
9135             struct timeval *tvp, tv[2];
9136             if (arg2) {
9137                 if (copy_from_user_timeval(&tv[0], arg2)
9138                     || copy_from_user_timeval(&tv[1],
9139                                               arg2 + sizeof(struct target_timeval)))
9140                     return -TARGET_EFAULT;
9141                 tvp = tv;
9142             } else {
9143                 tvp = NULL;
9144             }
9145             if (!(p = lock_user_string(arg1)))
9146                 return -TARGET_EFAULT;
9147             ret = get_errno(utimes(p, tvp));
9148             unlock_user(p, arg1, 0);
9149         }
9150         return ret;
9151 #endif
9152 #if defined(TARGET_NR_futimesat)
9153     case TARGET_NR_futimesat:
9154         {
9155             struct timeval *tvp, tv[2];
9156             if (arg3) {
9157                 if (copy_from_user_timeval(&tv[0], arg3)
9158                     || copy_from_user_timeval(&tv[1],
9159                                               arg3 + sizeof(struct target_timeval)))
9160                     return -TARGET_EFAULT;
9161                 tvp = tv;
9162             } else {
9163                 tvp = NULL;
9164             }
9165             if (!(p = lock_user_string(arg2))) {
9166                 return -TARGET_EFAULT;
9167             }
9168             ret = get_errno(futimesat(arg1, path(p), tvp));
9169             unlock_user(p, arg2, 0);
9170         }
9171         return ret;
9172 #endif
9173 #ifdef TARGET_NR_access
9174     case TARGET_NR_access:
9175         if (!(p = lock_user_string(arg1))) {
9176             return -TARGET_EFAULT;
9177         }
9178         ret = get_errno(access(path(p), arg2));
9179         unlock_user(p, arg1, 0);
9180         return ret;
9181 #endif
9182 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9183     case TARGET_NR_faccessat:
9184         if (!(p = lock_user_string(arg2))) {
9185             return -TARGET_EFAULT;
9186         }
9187         ret = get_errno(faccessat(arg1, p, arg3, 0));
9188         unlock_user(p, arg2, 0);
9189         return ret;
9190 #endif
9191 #if defined(TARGET_NR_faccessat2)
9192     case TARGET_NR_faccessat2:
9193         if (!(p = lock_user_string(arg2))) {
9194             return -TARGET_EFAULT;
9195         }
9196         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9197         unlock_user(p, arg2, 0);
9198         return ret;
9199 #endif
9200 #ifdef TARGET_NR_nice /* not on alpha */
9201     case TARGET_NR_nice:
9202         return get_errno(nice(arg1));
9203 #endif
9204     case TARGET_NR_sync:
9205         sync();
9206         return 0;
9207 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9208     case TARGET_NR_syncfs:
9209         return get_errno(syncfs(arg1));
9210 #endif
9211     case TARGET_NR_kill:
9212         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9213 #ifdef TARGET_NR_rename
9214     case TARGET_NR_rename:
9215         {
9216             void *p2;
9217             p = lock_user_string(arg1);
9218             p2 = lock_user_string(arg2);
9219             if (!p || !p2)
9220                 ret = -TARGET_EFAULT;
9221             else
9222                 ret = get_errno(rename(p, p2));
9223             unlock_user(p2, arg2, 0);
9224             unlock_user(p, arg1, 0);
9225         }
9226         return ret;
9227 #endif
9228 #if defined(TARGET_NR_renameat)
9229     case TARGET_NR_renameat:
9230         {
9231             void *p2;
9232             p  = lock_user_string(arg2);
9233             p2 = lock_user_string(arg4);
9234             if (!p || !p2)
9235                 ret = -TARGET_EFAULT;
9236             else
9237                 ret = get_errno(renameat(arg1, p, arg3, p2));
9238             unlock_user(p2, arg4, 0);
9239             unlock_user(p, arg2, 0);
9240         }
9241         return ret;
9242 #endif
9243 #if defined(TARGET_NR_renameat2)
9244     case TARGET_NR_renameat2:
9245         {
9246             void *p2;
9247             p  = lock_user_string(arg2);
9248             p2 = lock_user_string(arg4);
9249             if (!p || !p2) {
9250                 ret = -TARGET_EFAULT;
9251             } else {
9252                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9253             }
9254             unlock_user(p2, arg4, 0);
9255             unlock_user(p, arg2, 0);
9256         }
9257         return ret;
9258 #endif
9259 #ifdef TARGET_NR_mkdir
9260     case TARGET_NR_mkdir:
9261         if (!(p = lock_user_string(arg1)))
9262             return -TARGET_EFAULT;
9263         ret = get_errno(mkdir(p, arg2));
9264         unlock_user(p, arg1, 0);
9265         return ret;
9266 #endif
9267 #if defined(TARGET_NR_mkdirat)
9268     case TARGET_NR_mkdirat:
9269         if (!(p = lock_user_string(arg2)))
9270             return -TARGET_EFAULT;
9271         ret = get_errno(mkdirat(arg1, p, arg3));
9272         unlock_user(p, arg2, 0);
9273         return ret;
9274 #endif
9275 #ifdef TARGET_NR_rmdir
9276     case TARGET_NR_rmdir:
9277         if (!(p = lock_user_string(arg1)))
9278             return -TARGET_EFAULT;
9279         ret = get_errno(rmdir(p));
9280         unlock_user(p, arg1, 0);
9281         return ret;
9282 #endif
9283     case TARGET_NR_dup:
9284         ret = get_errno(dup(arg1));
9285         if (ret >= 0) {
9286             fd_trans_dup(arg1, ret);
9287         }
9288         return ret;
9289 #ifdef TARGET_NR_pipe
9290     case TARGET_NR_pipe:
9291         return do_pipe(cpu_env, arg1, 0, 0);
9292 #endif
9293 #ifdef TARGET_NR_pipe2
9294     case TARGET_NR_pipe2:
9295         return do_pipe(cpu_env, arg1,
9296                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9297 #endif
9298     case TARGET_NR_times:
9299         {
9300             struct target_tms *tmsp;
9301             struct tms tms;
9302             ret = get_errno(times(&tms));
9303             if (arg1) {
9304                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9305                 if (!tmsp)
9306                     return -TARGET_EFAULT;
9307                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9308                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9309                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9310                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9311             }
9312             if (!is_error(ret))
9313                 ret = host_to_target_clock_t(ret);
9314         }
9315         return ret;
9316     case TARGET_NR_acct:
9317         if (arg1 == 0) {
9318             ret = get_errno(acct(NULL));
9319         } else {
9320             if (!(p = lock_user_string(arg1))) {
9321                 return -TARGET_EFAULT;
9322             }
9323             ret = get_errno(acct(path(p)));
9324             unlock_user(p, arg1, 0);
9325         }
9326         return ret;
9327 #ifdef TARGET_NR_umount2
9328     case TARGET_NR_umount2:
9329         if (!(p = lock_user_string(arg1)))
9330             return -TARGET_EFAULT;
9331         ret = get_errno(umount2(p, arg2));
9332         unlock_user(p, arg1, 0);
9333         return ret;
9334 #endif
9335     case TARGET_NR_ioctl:
9336         return do_ioctl(arg1, arg2, arg3);
9337 #ifdef TARGET_NR_fcntl
9338     case TARGET_NR_fcntl:
9339         return do_fcntl(arg1, arg2, arg3);
9340 #endif
9341     case TARGET_NR_setpgid:
9342         return get_errno(setpgid(arg1, arg2));
9343     case TARGET_NR_umask:
9344         return get_errno(umask(arg1));
9345     case TARGET_NR_chroot:
9346         if (!(p = lock_user_string(arg1)))
9347             return -TARGET_EFAULT;
9348         ret = get_errno(chroot(p));
9349         unlock_user(p, arg1, 0);
9350         return ret;
9351 #ifdef TARGET_NR_dup2
9352     case TARGET_NR_dup2:
9353         ret = get_errno(dup2(arg1, arg2));
9354         if (ret >= 0) {
9355             fd_trans_dup(arg1, arg2);
9356         }
9357         return ret;
9358 #endif
9359 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9360     case TARGET_NR_dup3:
9361     {
9362         int host_flags;
9363 
9364         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9365             return -EINVAL;
9366         }
9367         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9368         ret = get_errno(dup3(arg1, arg2, host_flags));
9369         if (ret >= 0) {
9370             fd_trans_dup(arg1, arg2);
9371         }
9372         return ret;
9373     }
9374 #endif
9375 #ifdef TARGET_NR_getppid /* not on alpha */
9376     case TARGET_NR_getppid:
9377         return get_errno(getppid());
9378 #endif
9379 #ifdef TARGET_NR_getpgrp
9380     case TARGET_NR_getpgrp:
9381         return get_errno(getpgrp());
9382 #endif
9383     case TARGET_NR_setsid:
9384         return get_errno(setsid());
9385 #ifdef TARGET_NR_sigaction
9386     case TARGET_NR_sigaction:
9387         {
9388 #if defined(TARGET_MIPS)
9389 	    struct target_sigaction act, oact, *pact, *old_act;
9390 
9391 	    if (arg2) {
9392                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9393                     return -TARGET_EFAULT;
9394 		act._sa_handler = old_act->_sa_handler;
9395 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9396 		act.sa_flags = old_act->sa_flags;
9397 		unlock_user_struct(old_act, arg2, 0);
9398 		pact = &act;
9399 	    } else {
9400 		pact = NULL;
9401 	    }
9402 
9403         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9404 
9405 	    if (!is_error(ret) && arg3) {
9406                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9407                     return -TARGET_EFAULT;
9408 		old_act->_sa_handler = oact._sa_handler;
9409 		old_act->sa_flags = oact.sa_flags;
9410 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9411 		old_act->sa_mask.sig[1] = 0;
9412 		old_act->sa_mask.sig[2] = 0;
9413 		old_act->sa_mask.sig[3] = 0;
9414 		unlock_user_struct(old_act, arg3, 1);
9415 	    }
9416 #else
9417             struct target_old_sigaction *old_act;
9418             struct target_sigaction act, oact, *pact;
9419             if (arg2) {
9420                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9421                     return -TARGET_EFAULT;
9422                 act._sa_handler = old_act->_sa_handler;
9423                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9424                 act.sa_flags = old_act->sa_flags;
9425 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9426                 act.sa_restorer = old_act->sa_restorer;
9427 #endif
9428                 unlock_user_struct(old_act, arg2, 0);
9429                 pact = &act;
9430             } else {
9431                 pact = NULL;
9432             }
9433             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9434             if (!is_error(ret) && arg3) {
9435                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9436                     return -TARGET_EFAULT;
9437                 old_act->_sa_handler = oact._sa_handler;
9438                 old_act->sa_mask = oact.sa_mask.sig[0];
9439                 old_act->sa_flags = oact.sa_flags;
9440 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9441                 old_act->sa_restorer = oact.sa_restorer;
9442 #endif
9443                 unlock_user_struct(old_act, arg3, 1);
9444             }
9445 #endif
9446         }
9447         return ret;
9448 #endif
9449     case TARGET_NR_rt_sigaction:
9450         {
9451             /*
9452              * For Alpha and SPARC this is a 5 argument syscall, with
9453              * a 'restorer' parameter which must be copied into the
9454              * sa_restorer field of the sigaction struct.
9455              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9456              * and arg5 is the sigsetsize.
9457              */
9458 #if defined(TARGET_ALPHA)
9459             target_ulong sigsetsize = arg4;
9460             target_ulong restorer = arg5;
9461 #elif defined(TARGET_SPARC)
9462             target_ulong restorer = arg4;
9463             target_ulong sigsetsize = arg5;
9464 #else
9465             target_ulong sigsetsize = arg4;
9466             target_ulong restorer = 0;
9467 #endif
9468             struct target_sigaction *act = NULL;
9469             struct target_sigaction *oact = NULL;
9470 
9471             if (sigsetsize != sizeof(target_sigset_t)) {
9472                 return -TARGET_EINVAL;
9473             }
9474             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9475                 return -TARGET_EFAULT;
9476             }
9477             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9478                 ret = -TARGET_EFAULT;
9479             } else {
9480                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9481                 if (oact) {
9482                     unlock_user_struct(oact, arg3, 1);
9483                 }
9484             }
9485             if (act) {
9486                 unlock_user_struct(act, arg2, 0);
9487             }
9488         }
9489         return ret;
9490 #ifdef TARGET_NR_sgetmask /* not on alpha */
9491     case TARGET_NR_sgetmask:
9492         {
9493             sigset_t cur_set;
9494             abi_ulong target_set;
9495             ret = do_sigprocmask(0, NULL, &cur_set);
9496             if (!ret) {
9497                 host_to_target_old_sigset(&target_set, &cur_set);
9498                 ret = target_set;
9499             }
9500         }
9501         return ret;
9502 #endif
9503 #ifdef TARGET_NR_ssetmask /* not on alpha */
9504     case TARGET_NR_ssetmask:
9505         {
9506             sigset_t set, oset;
9507             abi_ulong target_set = arg1;
9508             target_to_host_old_sigset(&set, &target_set);
9509             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9510             if (!ret) {
9511                 host_to_target_old_sigset(&target_set, &oset);
9512                 ret = target_set;
9513             }
9514         }
9515         return ret;
9516 #endif
9517 #ifdef TARGET_NR_sigprocmask
9518     case TARGET_NR_sigprocmask:
9519         {
9520 #if defined(TARGET_ALPHA)
9521             sigset_t set, oldset;
9522             abi_ulong mask;
9523             int how;
9524 
9525             switch (arg1) {
9526             case TARGET_SIG_BLOCK:
9527                 how = SIG_BLOCK;
9528                 break;
9529             case TARGET_SIG_UNBLOCK:
9530                 how = SIG_UNBLOCK;
9531                 break;
9532             case TARGET_SIG_SETMASK:
9533                 how = SIG_SETMASK;
9534                 break;
9535             default:
9536                 return -TARGET_EINVAL;
9537             }
9538             mask = arg2;
9539             target_to_host_old_sigset(&set, &mask);
9540 
9541             ret = do_sigprocmask(how, &set, &oldset);
9542             if (!is_error(ret)) {
9543                 host_to_target_old_sigset(&mask, &oldset);
9544                 ret = mask;
9545                 cpu_env->ir[IR_V0] = 0; /* force no error */
9546             }
9547 #else
9548             sigset_t set, oldset, *set_ptr;
9549             int how;
9550 
9551             if (arg2) {
9552                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9553                 if (!p) {
9554                     return -TARGET_EFAULT;
9555                 }
9556                 target_to_host_old_sigset(&set, p);
9557                 unlock_user(p, arg2, 0);
9558                 set_ptr = &set;
9559                 switch (arg1) {
9560                 case TARGET_SIG_BLOCK:
9561                     how = SIG_BLOCK;
9562                     break;
9563                 case TARGET_SIG_UNBLOCK:
9564                     how = SIG_UNBLOCK;
9565                     break;
9566                 case TARGET_SIG_SETMASK:
9567                     how = SIG_SETMASK;
9568                     break;
9569                 default:
9570                     return -TARGET_EINVAL;
9571                 }
9572             } else {
9573                 how = 0;
9574                 set_ptr = NULL;
9575             }
9576             ret = do_sigprocmask(how, set_ptr, &oldset);
9577             if (!is_error(ret) && arg3) {
9578                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9579                     return -TARGET_EFAULT;
9580                 host_to_target_old_sigset(p, &oldset);
9581                 unlock_user(p, arg3, sizeof(target_sigset_t));
9582             }
9583 #endif
9584         }
9585         return ret;
9586 #endif
9587     case TARGET_NR_rt_sigprocmask:
9588         {
9589             int how = arg1;
9590             sigset_t set, oldset, *set_ptr;
9591 
9592             if (arg4 != sizeof(target_sigset_t)) {
9593                 return -TARGET_EINVAL;
9594             }
9595 
9596             if (arg2) {
9597                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9598                 if (!p) {
9599                     return -TARGET_EFAULT;
9600                 }
9601                 target_to_host_sigset(&set, p);
9602                 unlock_user(p, arg2, 0);
9603                 set_ptr = &set;
9604                 switch(how) {
9605                 case TARGET_SIG_BLOCK:
9606                     how = SIG_BLOCK;
9607                     break;
9608                 case TARGET_SIG_UNBLOCK:
9609                     how = SIG_UNBLOCK;
9610                     break;
9611                 case TARGET_SIG_SETMASK:
9612                     how = SIG_SETMASK;
9613                     break;
9614                 default:
9615                     return -TARGET_EINVAL;
9616                 }
9617             } else {
9618                 how = 0;
9619                 set_ptr = NULL;
9620             }
9621             ret = do_sigprocmask(how, set_ptr, &oldset);
9622             if (!is_error(ret) && arg3) {
9623                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9624                     return -TARGET_EFAULT;
9625                 host_to_target_sigset(p, &oldset);
9626                 unlock_user(p, arg3, sizeof(target_sigset_t));
9627             }
9628         }
9629         return ret;
9630 #ifdef TARGET_NR_sigpending
9631     case TARGET_NR_sigpending:
9632         {
9633             sigset_t set;
9634             ret = get_errno(sigpending(&set));
9635             if (!is_error(ret)) {
9636                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9637                     return -TARGET_EFAULT;
9638                 host_to_target_old_sigset(p, &set);
9639                 unlock_user(p, arg1, sizeof(target_sigset_t));
9640             }
9641         }
9642         return ret;
9643 #endif
9644     case TARGET_NR_rt_sigpending:
9645         {
9646             sigset_t set;
9647 
9648             /* Yes, this check is >, not != like most. We follow the kernel's
9649              * logic and it does it like this because it implements
9650              * NR_sigpending through the same code path, and in that case
9651              * the old_sigset_t is smaller in size.
9652              */
9653             if (arg2 > sizeof(target_sigset_t)) {
9654                 return -TARGET_EINVAL;
9655             }
9656 
9657             ret = get_errno(sigpending(&set));
9658             if (!is_error(ret)) {
9659                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9660                     return -TARGET_EFAULT;
9661                 host_to_target_sigset(p, &set);
9662                 unlock_user(p, arg1, sizeof(target_sigset_t));
9663             }
9664         }
9665         return ret;
9666 #ifdef TARGET_NR_sigsuspend
9667     case TARGET_NR_sigsuspend:
9668         {
9669             sigset_t *set;
9670 
9671 #if defined(TARGET_ALPHA)
9672             TaskState *ts = cpu->opaque;
9673             /* target_to_host_old_sigset will bswap back */
9674             abi_ulong mask = tswapal(arg1);
9675             set = &ts->sigsuspend_mask;
9676             target_to_host_old_sigset(set, &mask);
9677 #else
9678             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9679             if (ret != 0) {
9680                 return ret;
9681             }
9682 #endif
9683             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9684             finish_sigsuspend_mask(ret);
9685         }
9686         return ret;
9687 #endif
9688     case TARGET_NR_rt_sigsuspend:
9689         {
9690             sigset_t *set;
9691 
9692             ret = process_sigsuspend_mask(&set, arg1, arg2);
9693             if (ret != 0) {
9694                 return ret;
9695             }
9696             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9697             finish_sigsuspend_mask(ret);
9698         }
9699         return ret;
9700 #ifdef TARGET_NR_rt_sigtimedwait
9701     case TARGET_NR_rt_sigtimedwait:
9702         {
9703             sigset_t set;
9704             struct timespec uts, *puts;
9705             siginfo_t uinfo;
9706 
9707             if (arg4 != sizeof(target_sigset_t)) {
9708                 return -TARGET_EINVAL;
9709             }
9710 
9711             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9712                 return -TARGET_EFAULT;
9713             target_to_host_sigset(&set, p);
9714             unlock_user(p, arg1, 0);
9715             if (arg3) {
9716                 puts = &uts;
9717                 if (target_to_host_timespec(puts, arg3)) {
9718                     return -TARGET_EFAULT;
9719                 }
9720             } else {
9721                 puts = NULL;
9722             }
9723             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9724                                                  SIGSET_T_SIZE));
9725             if (!is_error(ret)) {
9726                 if (arg2) {
9727                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9728                                   0);
9729                     if (!p) {
9730                         return -TARGET_EFAULT;
9731                     }
9732                     host_to_target_siginfo(p, &uinfo);
9733                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9734                 }
9735                 ret = host_to_target_signal(ret);
9736             }
9737         }
9738         return ret;
9739 #endif
9740 #ifdef TARGET_NR_rt_sigtimedwait_time64
9741     case TARGET_NR_rt_sigtimedwait_time64:
9742         {
9743             sigset_t set;
9744             struct timespec uts, *puts;
9745             siginfo_t uinfo;
9746 
9747             if (arg4 != sizeof(target_sigset_t)) {
9748                 return -TARGET_EINVAL;
9749             }
9750 
9751             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9752             if (!p) {
9753                 return -TARGET_EFAULT;
9754             }
9755             target_to_host_sigset(&set, p);
9756             unlock_user(p, arg1, 0);
9757             if (arg3) {
9758                 puts = &uts;
9759                 if (target_to_host_timespec64(puts, arg3)) {
9760                     return -TARGET_EFAULT;
9761                 }
9762             } else {
9763                 puts = NULL;
9764             }
9765             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9766                                                  SIGSET_T_SIZE));
9767             if (!is_error(ret)) {
9768                 if (arg2) {
9769                     p = lock_user(VERIFY_WRITE, arg2,
9770                                   sizeof(target_siginfo_t), 0);
9771                     if (!p) {
9772                         return -TARGET_EFAULT;
9773                     }
9774                     host_to_target_siginfo(p, &uinfo);
9775                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9776                 }
9777                 ret = host_to_target_signal(ret);
9778             }
9779         }
9780         return ret;
9781 #endif
9782     case TARGET_NR_rt_sigqueueinfo:
9783         {
9784             siginfo_t uinfo;
9785 
9786             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9787             if (!p) {
9788                 return -TARGET_EFAULT;
9789             }
9790             target_to_host_siginfo(&uinfo, p);
9791             unlock_user(p, arg3, 0);
9792             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9793         }
9794         return ret;
9795     case TARGET_NR_rt_tgsigqueueinfo:
9796         {
9797             siginfo_t uinfo;
9798 
9799             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9800             if (!p) {
9801                 return -TARGET_EFAULT;
9802             }
9803             target_to_host_siginfo(&uinfo, p);
9804             unlock_user(p, arg4, 0);
9805             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9806         }
9807         return ret;
9808 #ifdef TARGET_NR_sigreturn
9809     case TARGET_NR_sigreturn:
9810         if (block_signals()) {
9811             return -QEMU_ERESTARTSYS;
9812         }
9813         return do_sigreturn(cpu_env);
9814 #endif
9815     case TARGET_NR_rt_sigreturn:
9816         if (block_signals()) {
9817             return -QEMU_ERESTARTSYS;
9818         }
9819         return do_rt_sigreturn(cpu_env);
9820     case TARGET_NR_sethostname:
9821         if (!(p = lock_user_string(arg1)))
9822             return -TARGET_EFAULT;
9823         ret = get_errno(sethostname(p, arg2));
9824         unlock_user(p, arg1, 0);
9825         return ret;
9826 #ifdef TARGET_NR_setrlimit
9827     case TARGET_NR_setrlimit:
9828         {
9829             int resource = target_to_host_resource(arg1);
9830             struct target_rlimit *target_rlim;
9831             struct rlimit rlim;
9832             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9833                 return -TARGET_EFAULT;
9834             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9835             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9836             unlock_user_struct(target_rlim, arg2, 0);
9837             /*
9838              * If we just passed through resource limit settings for memory then
9839              * they would also apply to QEMU's own allocations, and QEMU will
9840              * crash or hang or die if its allocations fail. Ideally we would
9841              * track the guest allocations in QEMU and apply the limits ourselves.
9842              * For now, just tell the guest the call succeeded but don't actually
9843              * limit anything.
9844              */
9845             if (resource != RLIMIT_AS &&
9846                 resource != RLIMIT_DATA &&
9847                 resource != RLIMIT_STACK) {
9848                 return get_errno(setrlimit(resource, &rlim));
9849             } else {
9850                 return 0;
9851             }
9852         }
9853 #endif
9854 #ifdef TARGET_NR_getrlimit
9855     case TARGET_NR_getrlimit:
9856         {
9857             int resource = target_to_host_resource(arg1);
9858             struct target_rlimit *target_rlim;
9859             struct rlimit rlim;
9860 
9861             ret = get_errno(getrlimit(resource, &rlim));
9862             if (!is_error(ret)) {
9863                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9864                     return -TARGET_EFAULT;
9865                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9866                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9867                 unlock_user_struct(target_rlim, arg2, 1);
9868             }
9869         }
9870         return ret;
9871 #endif
9872     case TARGET_NR_getrusage:
9873         {
9874             struct rusage rusage;
9875             ret = get_errno(getrusage(arg1, &rusage));
9876             if (!is_error(ret)) {
9877                 ret = host_to_target_rusage(arg2, &rusage);
9878             }
9879         }
9880         return ret;
9881 #if defined(TARGET_NR_gettimeofday)
9882     case TARGET_NR_gettimeofday:
9883         {
9884             struct timeval tv;
9885             struct timezone tz;
9886 
9887             ret = get_errno(gettimeofday(&tv, &tz));
9888             if (!is_error(ret)) {
9889                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9890                     return -TARGET_EFAULT;
9891                 }
9892                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9893                     return -TARGET_EFAULT;
9894                 }
9895             }
9896         }
9897         return ret;
9898 #endif
9899 #if defined(TARGET_NR_settimeofday)
9900     case TARGET_NR_settimeofday:
9901         {
9902             struct timeval tv, *ptv = NULL;
9903             struct timezone tz, *ptz = NULL;
9904 
9905             if (arg1) {
9906                 if (copy_from_user_timeval(&tv, arg1)) {
9907                     return -TARGET_EFAULT;
9908                 }
9909                 ptv = &tv;
9910             }
9911 
9912             if (arg2) {
9913                 if (copy_from_user_timezone(&tz, arg2)) {
9914                     return -TARGET_EFAULT;
9915                 }
9916                 ptz = &tz;
9917             }
9918 
9919             return get_errno(settimeofday(ptv, ptz));
9920         }
9921 #endif
9922 #if defined(TARGET_NR_select)
9923     case TARGET_NR_select:
9924 #if defined(TARGET_WANT_NI_OLD_SELECT)
9925         /* some architectures used to have old_select here
9926          * but now ENOSYS it.
9927          */
9928         ret = -TARGET_ENOSYS;
9929 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9930         ret = do_old_select(arg1);
9931 #else
9932         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9933 #endif
9934         return ret;
9935 #endif
9936 #ifdef TARGET_NR_pselect6
9937     case TARGET_NR_pselect6:
9938         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9939 #endif
9940 #ifdef TARGET_NR_pselect6_time64
9941     case TARGET_NR_pselect6_time64:
9942         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9943 #endif
9944 #ifdef TARGET_NR_symlink
9945     case TARGET_NR_symlink:
9946         {
9947             void *p2;
9948             p = lock_user_string(arg1);
9949             p2 = lock_user_string(arg2);
9950             if (!p || !p2)
9951                 ret = -TARGET_EFAULT;
9952             else
9953                 ret = get_errno(symlink(p, p2));
9954             unlock_user(p2, arg2, 0);
9955             unlock_user(p, arg1, 0);
9956         }
9957         return ret;
9958 #endif
9959 #if defined(TARGET_NR_symlinkat)
9960     case TARGET_NR_symlinkat:
9961         {
9962             void *p2;
9963             p  = lock_user_string(arg1);
9964             p2 = lock_user_string(arg3);
9965             if (!p || !p2)
9966                 ret = -TARGET_EFAULT;
9967             else
9968                 ret = get_errno(symlinkat(p, arg2, p2));
9969             unlock_user(p2, arg3, 0);
9970             unlock_user(p, arg1, 0);
9971         }
9972         return ret;
9973 #endif
9974 #ifdef TARGET_NR_readlink
9975     case TARGET_NR_readlink:
9976         {
9977             void *p2;
9978             p = lock_user_string(arg1);
9979             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9980             if (!p || !p2) {
9981                 ret = -TARGET_EFAULT;
9982             } else if (!arg3) {
9983                 /* Short circuit this for the magic exe check. */
9984                 ret = -TARGET_EINVAL;
9985             } else if (is_proc_myself((const char *)p, "exe")) {
9986                 char real[PATH_MAX], *temp;
9987                 temp = realpath(exec_path, real);
9988                 /* Return value is # of bytes that we wrote to the buffer. */
9989                 if (temp == NULL) {
9990                     ret = get_errno(-1);
9991                 } else {
9992                     /* Don't worry about sign mismatch as earlier mapping
9993                      * logic would have thrown a bad address error. */
9994                     ret = MIN(strlen(real), arg3);
9995                     /* We cannot NUL terminate the string. */
9996                     memcpy(p2, real, ret);
9997                 }
9998             } else {
9999                 ret = get_errno(readlink(path(p), p2, arg3));
10000             }
10001             unlock_user(p2, arg2, ret);
10002             unlock_user(p, arg1, 0);
10003         }
10004         return ret;
10005 #endif
10006 #if defined(TARGET_NR_readlinkat)
10007     case TARGET_NR_readlinkat:
10008         {
10009             void *p2;
10010             p  = lock_user_string(arg2);
10011             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10012             if (!p || !p2) {
10013                 ret = -TARGET_EFAULT;
10014             } else if (!arg4) {
10015                 /* Short circuit this for the magic exe check. */
10016                 ret = -TARGET_EINVAL;
10017             } else if (is_proc_myself((const char *)p, "exe")) {
10018                 char real[PATH_MAX], *temp;
10019                 temp = realpath(exec_path, real);
10020                 /* Return value is # of bytes that we wrote to the buffer. */
10021                 if (temp == NULL) {
10022                     ret = get_errno(-1);
10023                 } else {
10024                     /* Don't worry about sign mismatch as earlier mapping
10025                      * logic would have thrown a bad address error. */
10026                     ret = MIN(strlen(real), arg4);
10027                     /* We cannot NUL terminate the string. */
10028                     memcpy(p2, real, ret);
10029                 }
10030             } else {
10031                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10032             }
10033             unlock_user(p2, arg3, ret);
10034             unlock_user(p, arg2, 0);
10035         }
10036         return ret;
10037 #endif
10038 #ifdef TARGET_NR_swapon
10039     case TARGET_NR_swapon:
10040         if (!(p = lock_user_string(arg1)))
10041             return -TARGET_EFAULT;
10042         ret = get_errno(swapon(p, arg2));
10043         unlock_user(p, arg1, 0);
10044         return ret;
10045 #endif
10046     case TARGET_NR_reboot:
10047         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10048            /* arg4 must be ignored in all other cases */
10049            p = lock_user_string(arg4);
10050            if (!p) {
10051                return -TARGET_EFAULT;
10052            }
10053            ret = get_errno(reboot(arg1, arg2, arg3, p));
10054            unlock_user(p, arg4, 0);
10055         } else {
10056            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10057         }
10058         return ret;
10059 #ifdef TARGET_NR_mmap
10060     case TARGET_NR_mmap:
10061 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10062     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10063     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10064     || defined(TARGET_S390X)
10065         {
10066             abi_ulong *v;
10067             abi_ulong v1, v2, v3, v4, v5, v6;
10068             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10069                 return -TARGET_EFAULT;
10070             v1 = tswapal(v[0]);
10071             v2 = tswapal(v[1]);
10072             v3 = tswapal(v[2]);
10073             v4 = tswapal(v[3]);
10074             v5 = tswapal(v[4]);
10075             v6 = tswapal(v[5]);
10076             unlock_user(v, arg1, 0);
10077             ret = get_errno(target_mmap(v1, v2, v3,
10078                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10079                                         v5, v6));
10080         }
10081 #else
10082         /* mmap pointers are always untagged */
10083         ret = get_errno(target_mmap(arg1, arg2, arg3,
10084                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10085                                     arg5,
10086                                     arg6));
10087 #endif
10088         return ret;
10089 #endif
10090 #ifdef TARGET_NR_mmap2
10091     case TARGET_NR_mmap2:
10092 #ifndef MMAP_SHIFT
10093 #define MMAP_SHIFT 12
10094 #endif
10095         ret = target_mmap(arg1, arg2, arg3,
10096                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10097                           arg5, arg6 << MMAP_SHIFT);
10098         return get_errno(ret);
10099 #endif
10100     case TARGET_NR_munmap:
10101         arg1 = cpu_untagged_addr(cpu, arg1);
10102         return get_errno(target_munmap(arg1, arg2));
10103     case TARGET_NR_mprotect:
10104         arg1 = cpu_untagged_addr(cpu, arg1);
10105         {
10106             TaskState *ts = cpu->opaque;
10107             /* Special hack to detect libc making the stack executable.  */
10108             if ((arg3 & PROT_GROWSDOWN)
10109                 && arg1 >= ts->info->stack_limit
10110                 && arg1 <= ts->info->start_stack) {
10111                 arg3 &= ~PROT_GROWSDOWN;
10112                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10113                 arg1 = ts->info->stack_limit;
10114             }
10115         }
10116         return get_errno(target_mprotect(arg1, arg2, arg3));
10117 #ifdef TARGET_NR_mremap
10118     case TARGET_NR_mremap:
10119         arg1 = cpu_untagged_addr(cpu, arg1);
10120         /* mremap new_addr (arg5) is always untagged */
10121         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10122 #endif
10123         /* ??? msync/mlock/munlock are broken for softmmu.  */
10124 #ifdef TARGET_NR_msync
10125     case TARGET_NR_msync:
10126         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10127 #endif
10128 #ifdef TARGET_NR_mlock
10129     case TARGET_NR_mlock:
10130         return get_errno(mlock(g2h(cpu, arg1), arg2));
10131 #endif
10132 #ifdef TARGET_NR_munlock
10133     case TARGET_NR_munlock:
10134         return get_errno(munlock(g2h(cpu, arg1), arg2));
10135 #endif
10136 #ifdef TARGET_NR_mlockall
10137     case TARGET_NR_mlockall:
10138         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10139 #endif
10140 #ifdef TARGET_NR_munlockall
10141     case TARGET_NR_munlockall:
10142         return get_errno(munlockall());
10143 #endif
10144 #ifdef TARGET_NR_truncate
10145     case TARGET_NR_truncate:
10146         if (!(p = lock_user_string(arg1)))
10147             return -TARGET_EFAULT;
10148         ret = get_errno(truncate(p, arg2));
10149         unlock_user(p, arg1, 0);
10150         return ret;
10151 #endif
10152 #ifdef TARGET_NR_ftruncate
10153     case TARGET_NR_ftruncate:
10154         return get_errno(ftruncate(arg1, arg2));
10155 #endif
10156     case TARGET_NR_fchmod:
10157         return get_errno(fchmod(arg1, arg2));
10158 #if defined(TARGET_NR_fchmodat)
10159     case TARGET_NR_fchmodat:
10160         if (!(p = lock_user_string(arg2)))
10161             return -TARGET_EFAULT;
10162         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10163         unlock_user(p, arg2, 0);
10164         return ret;
10165 #endif
10166     case TARGET_NR_getpriority:
10167         /* Note that negative values are valid for getpriority, so we must
10168            differentiate based on errno settings.  */
10169         errno = 0;
10170         ret = getpriority(arg1, arg2);
10171         if (ret == -1 && errno != 0) {
10172             return -host_to_target_errno(errno);
10173         }
10174 #ifdef TARGET_ALPHA
10175         /* Return value is the unbiased priority.  Signal no error.  */
10176         cpu_env->ir[IR_V0] = 0;
10177 #else
10178         /* Return value is a biased priority to avoid negative numbers.  */
10179         ret = 20 - ret;
10180 #endif
10181         return ret;
10182     case TARGET_NR_setpriority:
10183         return get_errno(setpriority(arg1, arg2, arg3));
10184 #ifdef TARGET_NR_statfs
10185     case TARGET_NR_statfs:
10186         if (!(p = lock_user_string(arg1))) {
10187             return -TARGET_EFAULT;
10188         }
10189         ret = get_errno(statfs(path(p), &stfs));
10190         unlock_user(p, arg1, 0);
10191     convert_statfs:
10192         if (!is_error(ret)) {
10193             struct target_statfs *target_stfs;
10194 
10195             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10196                 return -TARGET_EFAULT;
10197             __put_user(stfs.f_type, &target_stfs->f_type);
10198             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10199             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10200             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10201             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10202             __put_user(stfs.f_files, &target_stfs->f_files);
10203             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10204             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10205             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10206             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10207             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10208 #ifdef _STATFS_F_FLAGS
10209             __put_user(stfs.f_flags, &target_stfs->f_flags);
10210 #else
10211             __put_user(0, &target_stfs->f_flags);
10212 #endif
10213             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10214             unlock_user_struct(target_stfs, arg2, 1);
10215         }
10216         return ret;
10217 #endif
10218 #ifdef TARGET_NR_fstatfs
10219     case TARGET_NR_fstatfs:
10220         ret = get_errno(fstatfs(arg1, &stfs));
10221         goto convert_statfs;
10222 #endif
10223 #ifdef TARGET_NR_statfs64
10224     case TARGET_NR_statfs64:
10225         if (!(p = lock_user_string(arg1))) {
10226             return -TARGET_EFAULT;
10227         }
10228         ret = get_errno(statfs(path(p), &stfs));
10229         unlock_user(p, arg1, 0);
10230     convert_statfs64:
10231         if (!is_error(ret)) {
10232             struct target_statfs64 *target_stfs;
10233 
10234             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10235                 return -TARGET_EFAULT;
10236             __put_user(stfs.f_type, &target_stfs->f_type);
10237             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10238             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10239             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10240             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10241             __put_user(stfs.f_files, &target_stfs->f_files);
10242             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10243             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10244             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10245             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10246             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10247 #ifdef _STATFS_F_FLAGS
10248             __put_user(stfs.f_flags, &target_stfs->f_flags);
10249 #else
10250             __put_user(0, &target_stfs->f_flags);
10251 #endif
10252             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10253             unlock_user_struct(target_stfs, arg3, 1);
10254         }
10255         return ret;
10256     case TARGET_NR_fstatfs64:
10257         ret = get_errno(fstatfs(arg1, &stfs));
10258         goto convert_statfs64;
10259 #endif
10260 #ifdef TARGET_NR_socketcall
10261     case TARGET_NR_socketcall:
10262         return do_socketcall(arg1, arg2);
10263 #endif
10264 #ifdef TARGET_NR_accept
10265     case TARGET_NR_accept:
10266         return do_accept4(arg1, arg2, arg3, 0);
10267 #endif
10268 #ifdef TARGET_NR_accept4
10269     case TARGET_NR_accept4:
10270         return do_accept4(arg1, arg2, arg3, arg4);
10271 #endif
10272 #ifdef TARGET_NR_bind
10273     case TARGET_NR_bind:
10274         return do_bind(arg1, arg2, arg3);
10275 #endif
10276 #ifdef TARGET_NR_connect
10277     case TARGET_NR_connect:
10278         return do_connect(arg1, arg2, arg3);
10279 #endif
10280 #ifdef TARGET_NR_getpeername
10281     case TARGET_NR_getpeername:
10282         return do_getpeername(arg1, arg2, arg3);
10283 #endif
10284 #ifdef TARGET_NR_getsockname
10285     case TARGET_NR_getsockname:
10286         return do_getsockname(arg1, arg2, arg3);
10287 #endif
10288 #ifdef TARGET_NR_getsockopt
10289     case TARGET_NR_getsockopt:
10290         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10291 #endif
10292 #ifdef TARGET_NR_listen
10293     case TARGET_NR_listen:
10294         return get_errno(listen(arg1, arg2));
10295 #endif
10296 #ifdef TARGET_NR_recv
10297     case TARGET_NR_recv:
10298         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10299 #endif
10300 #ifdef TARGET_NR_recvfrom
10301     case TARGET_NR_recvfrom:
10302         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10303 #endif
10304 #ifdef TARGET_NR_recvmsg
10305     case TARGET_NR_recvmsg:
10306         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10307 #endif
10308 #ifdef TARGET_NR_send
10309     case TARGET_NR_send:
10310         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10311 #endif
10312 #ifdef TARGET_NR_sendmsg
10313     case TARGET_NR_sendmsg:
10314         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10315 #endif
10316 #ifdef TARGET_NR_sendmmsg
10317     case TARGET_NR_sendmmsg:
10318         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10319 #endif
10320 #ifdef TARGET_NR_recvmmsg
10321     case TARGET_NR_recvmmsg:
10322         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10323 #endif
10324 #ifdef TARGET_NR_sendto
10325     case TARGET_NR_sendto:
10326         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10327 #endif
10328 #ifdef TARGET_NR_shutdown
10329     case TARGET_NR_shutdown:
10330         return get_errno(shutdown(arg1, arg2));
10331 #endif
10332 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10333     case TARGET_NR_getrandom:
10334         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10335         if (!p) {
10336             return -TARGET_EFAULT;
10337         }
10338         ret = get_errno(getrandom(p, arg2, arg3));
10339         unlock_user(p, arg1, ret);
10340         return ret;
10341 #endif
10342 #ifdef TARGET_NR_socket
10343     case TARGET_NR_socket:
10344         return do_socket(arg1, arg2, arg3);
10345 #endif
10346 #ifdef TARGET_NR_socketpair
10347     case TARGET_NR_socketpair:
10348         return do_socketpair(arg1, arg2, arg3, arg4);
10349 #endif
10350 #ifdef TARGET_NR_setsockopt
10351     case TARGET_NR_setsockopt:
10352         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10353 #endif
10354 #if defined(TARGET_NR_syslog)
10355     case TARGET_NR_syslog:
10356         {
10357             int len = arg2;
10358 
10359             switch (arg1) {
10360             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10361             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10362             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10363             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10364             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10365             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10366             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10367             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10368                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10369             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10370             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10371             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10372                 {
10373                     if (len < 0) {
10374                         return -TARGET_EINVAL;
10375                     }
10376                     if (len == 0) {
10377                         return 0;
10378                     }
10379                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10380                     if (!p) {
10381                         return -TARGET_EFAULT;
10382                     }
10383                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10384                     unlock_user(p, arg2, arg3);
10385                 }
10386                 return ret;
10387             default:
10388                 return -TARGET_EINVAL;
10389             }
10390         }
10391         break;
10392 #endif
10393     case TARGET_NR_setitimer:
10394         {
10395             struct itimerval value, ovalue, *pvalue;
10396 
10397             if (arg2) {
10398                 pvalue = &value;
10399                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10400                     || copy_from_user_timeval(&pvalue->it_value,
10401                                               arg2 + sizeof(struct target_timeval)))
10402                     return -TARGET_EFAULT;
10403             } else {
10404                 pvalue = NULL;
10405             }
10406             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10407             if (!is_error(ret) && arg3) {
10408                 if (copy_to_user_timeval(arg3,
10409                                          &ovalue.it_interval)
10410                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10411                                             &ovalue.it_value))
10412                     return -TARGET_EFAULT;
10413             }
10414         }
10415         return ret;
10416     case TARGET_NR_getitimer:
10417         {
10418             struct itimerval value;
10419 
10420             ret = get_errno(getitimer(arg1, &value));
10421             if (!is_error(ret) && arg2) {
10422                 if (copy_to_user_timeval(arg2,
10423                                          &value.it_interval)
10424                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10425                                             &value.it_value))
10426                     return -TARGET_EFAULT;
10427             }
10428         }
10429         return ret;
10430 #ifdef TARGET_NR_stat
10431     case TARGET_NR_stat:
10432         if (!(p = lock_user_string(arg1))) {
10433             return -TARGET_EFAULT;
10434         }
10435         ret = get_errno(stat(path(p), &st));
10436         unlock_user(p, arg1, 0);
10437         goto do_stat;
10438 #endif
10439 #ifdef TARGET_NR_lstat
10440     case TARGET_NR_lstat:
10441         if (!(p = lock_user_string(arg1))) {
10442             return -TARGET_EFAULT;
10443         }
10444         ret = get_errno(lstat(path(p), &st));
10445         unlock_user(p, arg1, 0);
10446         goto do_stat;
10447 #endif
10448 #ifdef TARGET_NR_fstat
10449     case TARGET_NR_fstat:
10450         {
10451             ret = get_errno(fstat(arg1, &st));
10452 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10453         do_stat:
10454 #endif
10455             if (!is_error(ret)) {
10456                 struct target_stat *target_st;
10457 
10458                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10459                     return -TARGET_EFAULT;
10460                 memset(target_st, 0, sizeof(*target_st));
10461                 __put_user(st.st_dev, &target_st->st_dev);
10462                 __put_user(st.st_ino, &target_st->st_ino);
10463                 __put_user(st.st_mode, &target_st->st_mode);
10464                 __put_user(st.st_uid, &target_st->st_uid);
10465                 __put_user(st.st_gid, &target_st->st_gid);
10466                 __put_user(st.st_nlink, &target_st->st_nlink);
10467                 __put_user(st.st_rdev, &target_st->st_rdev);
10468                 __put_user(st.st_size, &target_st->st_size);
10469                 __put_user(st.st_blksize, &target_st->st_blksize);
10470                 __put_user(st.st_blocks, &target_st->st_blocks);
10471                 __put_user(st.st_atime, &target_st->target_st_atime);
10472                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10473                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10474 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10475                 __put_user(st.st_atim.tv_nsec,
10476                            &target_st->target_st_atime_nsec);
10477                 __put_user(st.st_mtim.tv_nsec,
10478                            &target_st->target_st_mtime_nsec);
10479                 __put_user(st.st_ctim.tv_nsec,
10480                            &target_st->target_st_ctime_nsec);
10481 #endif
10482                 unlock_user_struct(target_st, arg2, 1);
10483             }
10484         }
10485         return ret;
10486 #endif
10487     case TARGET_NR_vhangup:
10488         return get_errno(vhangup());
10489 #ifdef TARGET_NR_syscall
10490     case TARGET_NR_syscall:
10491         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10492                           arg6, arg7, arg8, 0);
10493 #endif
10494 #if defined(TARGET_NR_wait4)
10495     case TARGET_NR_wait4:
10496         {
10497             int status;
10498             abi_long status_ptr = arg2;
10499             struct rusage rusage, *rusage_ptr;
10500             abi_ulong target_rusage = arg4;
10501             abi_long rusage_err;
10502             if (target_rusage)
10503                 rusage_ptr = &rusage;
10504             else
10505                 rusage_ptr = NULL;
10506             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10507             if (!is_error(ret)) {
10508                 if (status_ptr && ret) {
10509                     status = host_to_target_waitstatus(status);
10510                     if (put_user_s32(status, status_ptr))
10511                         return -TARGET_EFAULT;
10512                 }
10513                 if (target_rusage) {
10514                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10515                     if (rusage_err) {
10516                         ret = rusage_err;
10517                     }
10518                 }
10519             }
10520         }
10521         return ret;
10522 #endif
10523 #ifdef TARGET_NR_swapoff
10524     case TARGET_NR_swapoff:
10525         if (!(p = lock_user_string(arg1)))
10526             return -TARGET_EFAULT;
10527         ret = get_errno(swapoff(p));
10528         unlock_user(p, arg1, 0);
10529         return ret;
10530 #endif
10531     case TARGET_NR_sysinfo:
10532         {
10533             struct target_sysinfo *target_value;
10534             struct sysinfo value;
10535             ret = get_errno(sysinfo(&value));
10536             if (!is_error(ret) && arg1)
10537             {
10538                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10539                     return -TARGET_EFAULT;
10540                 __put_user(value.uptime, &target_value->uptime);
10541                 __put_user(value.loads[0], &target_value->loads[0]);
10542                 __put_user(value.loads[1], &target_value->loads[1]);
10543                 __put_user(value.loads[2], &target_value->loads[2]);
10544                 __put_user(value.totalram, &target_value->totalram);
10545                 __put_user(value.freeram, &target_value->freeram);
10546                 __put_user(value.sharedram, &target_value->sharedram);
10547                 __put_user(value.bufferram, &target_value->bufferram);
10548                 __put_user(value.totalswap, &target_value->totalswap);
10549                 __put_user(value.freeswap, &target_value->freeswap);
10550                 __put_user(value.procs, &target_value->procs);
10551                 __put_user(value.totalhigh, &target_value->totalhigh);
10552                 __put_user(value.freehigh, &target_value->freehigh);
10553                 __put_user(value.mem_unit, &target_value->mem_unit);
10554                 unlock_user_struct(target_value, arg1, 1);
10555             }
10556         }
10557         return ret;
10558 #ifdef TARGET_NR_ipc
10559     case TARGET_NR_ipc:
10560         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10561 #endif
10562 #ifdef TARGET_NR_semget
10563     case TARGET_NR_semget:
10564         return get_errno(semget(arg1, arg2, arg3));
10565 #endif
10566 #ifdef TARGET_NR_semop
10567     case TARGET_NR_semop:
10568         return do_semtimedop(arg1, arg2, arg3, 0, false);
10569 #endif
10570 #ifdef TARGET_NR_semtimedop
10571     case TARGET_NR_semtimedop:
10572         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10573 #endif
10574 #ifdef TARGET_NR_semtimedop_time64
10575     case TARGET_NR_semtimedop_time64:
10576         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10577 #endif
10578 #ifdef TARGET_NR_semctl
10579     case TARGET_NR_semctl:
10580         return do_semctl(arg1, arg2, arg3, arg4);
10581 #endif
10582 #ifdef TARGET_NR_msgctl
10583     case TARGET_NR_msgctl:
10584         return do_msgctl(arg1, arg2, arg3);
10585 #endif
10586 #ifdef TARGET_NR_msgget
10587     case TARGET_NR_msgget:
10588         return get_errno(msgget(arg1, arg2));
10589 #endif
10590 #ifdef TARGET_NR_msgrcv
10591     case TARGET_NR_msgrcv:
10592         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10593 #endif
10594 #ifdef TARGET_NR_msgsnd
10595     case TARGET_NR_msgsnd:
10596         return do_msgsnd(arg1, arg2, arg3, arg4);
10597 #endif
10598 #ifdef TARGET_NR_shmget
10599     case TARGET_NR_shmget:
10600         return get_errno(shmget(arg1, arg2, arg3));
10601 #endif
10602 #ifdef TARGET_NR_shmctl
10603     case TARGET_NR_shmctl:
10604         return do_shmctl(arg1, arg2, arg3);
10605 #endif
10606 #ifdef TARGET_NR_shmat
10607     case TARGET_NR_shmat:
10608         return do_shmat(cpu_env, arg1, arg2, arg3);
10609 #endif
10610 #ifdef TARGET_NR_shmdt
10611     case TARGET_NR_shmdt:
10612         return do_shmdt(arg1);
10613 #endif
10614     case TARGET_NR_fsync:
10615         return get_errno(fsync(arg1));
10616     case TARGET_NR_clone:
10617         /* Linux manages to have three different orderings for its
10618          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10619          * match the kernel's CONFIG_CLONE_* settings.
10620          * Microblaze is further special in that it uses a sixth
10621          * implicit argument to clone for the TLS pointer.
10622          */
10623 #if defined(TARGET_MICROBLAZE)
10624         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10625 #elif defined(TARGET_CLONE_BACKWARDS)
10626         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10627 #elif defined(TARGET_CLONE_BACKWARDS2)
10628         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10629 #else
10630         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10631 #endif
10632         return ret;
10633 #ifdef __NR_exit_group
10634         /* new thread calls */
10635     case TARGET_NR_exit_group:
10636         preexit_cleanup(cpu_env, arg1);
10637         return get_errno(exit_group(arg1));
10638 #endif
10639     case TARGET_NR_setdomainname:
10640         if (!(p = lock_user_string(arg1)))
10641             return -TARGET_EFAULT;
10642         ret = get_errno(setdomainname(p, arg2));
10643         unlock_user(p, arg1, 0);
10644         return ret;
10645     case TARGET_NR_uname:
10646         /* no need to transcode because we use the linux syscall */
10647         {
10648             struct new_utsname * buf;
10649 
10650             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10651                 return -TARGET_EFAULT;
10652             ret = get_errno(sys_uname(buf));
10653             if (!is_error(ret)) {
10654                 /* Overwrite the native machine name with whatever is being
10655                    emulated. */
10656                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10657                           sizeof(buf->machine));
10658                 /* Allow the user to override the reported release.  */
10659                 if (qemu_uname_release && *qemu_uname_release) {
10660                     g_strlcpy(buf->release, qemu_uname_release,
10661                               sizeof(buf->release));
10662                 }
10663             }
10664             unlock_user_struct(buf, arg1, 1);
10665         }
10666         return ret;
10667 #ifdef TARGET_I386
10668     case TARGET_NR_modify_ldt:
10669         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10670 #if !defined(TARGET_X86_64)
10671     case TARGET_NR_vm86:
10672         return do_vm86(cpu_env, arg1, arg2);
10673 #endif
10674 #endif
10675 #if defined(TARGET_NR_adjtimex)
10676     case TARGET_NR_adjtimex:
10677         {
10678             struct timex host_buf;
10679 
10680             if (target_to_host_timex(&host_buf, arg1) != 0) {
10681                 return -TARGET_EFAULT;
10682             }
10683             ret = get_errno(adjtimex(&host_buf));
10684             if (!is_error(ret)) {
10685                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10686                     return -TARGET_EFAULT;
10687                 }
10688             }
10689         }
10690         return ret;
10691 #endif
10692 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10693     case TARGET_NR_clock_adjtime:
10694         {
10695             struct timex htx, *phtx = &htx;
10696 
10697             if (target_to_host_timex(phtx, arg2) != 0) {
10698                 return -TARGET_EFAULT;
10699             }
10700             ret = get_errno(clock_adjtime(arg1, phtx));
10701             if (!is_error(ret) && phtx) {
10702                 if (host_to_target_timex(arg2, phtx) != 0) {
10703                     return -TARGET_EFAULT;
10704                 }
10705             }
10706         }
10707         return ret;
10708 #endif
10709 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10710     case TARGET_NR_clock_adjtime64:
10711         {
10712             struct timex htx;
10713 
10714             if (target_to_host_timex64(&htx, arg2) != 0) {
10715                 return -TARGET_EFAULT;
10716             }
10717             ret = get_errno(clock_adjtime(arg1, &htx));
10718             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10719                     return -TARGET_EFAULT;
10720             }
10721         }
10722         return ret;
10723 #endif
10724     case TARGET_NR_getpgid:
10725         return get_errno(getpgid(arg1));
10726     case TARGET_NR_fchdir:
10727         return get_errno(fchdir(arg1));
10728     case TARGET_NR_personality:
10729         return get_errno(personality(arg1));
10730 #ifdef TARGET_NR__llseek /* Not on alpha */
10731     case TARGET_NR__llseek:
10732         {
10733             int64_t res;
10734 #if !defined(__NR_llseek)
10735             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10736             if (res == -1) {
10737                 ret = get_errno(res);
10738             } else {
10739                 ret = 0;
10740             }
10741 #else
10742             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10743 #endif
10744             if ((ret == 0) && put_user_s64(res, arg4)) {
10745                 return -TARGET_EFAULT;
10746             }
10747         }
10748         return ret;
10749 #endif
10750 #ifdef TARGET_NR_getdents
10751     case TARGET_NR_getdents:
10752         return do_getdents(arg1, arg2, arg3);
10753 #endif /* TARGET_NR_getdents */
10754 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10755     case TARGET_NR_getdents64:
10756         return do_getdents64(arg1, arg2, arg3);
10757 #endif /* TARGET_NR_getdents64 */
10758 #if defined(TARGET_NR__newselect)
10759     case TARGET_NR__newselect:
10760         return do_select(arg1, arg2, arg3, arg4, arg5);
10761 #endif
10762 #ifdef TARGET_NR_poll
10763     case TARGET_NR_poll:
10764         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10765 #endif
10766 #ifdef TARGET_NR_ppoll
10767     case TARGET_NR_ppoll:
10768         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10769 #endif
10770 #ifdef TARGET_NR_ppoll_time64
10771     case TARGET_NR_ppoll_time64:
10772         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10773 #endif
10774     case TARGET_NR_flock:
10775         /* NOTE: the flock constant seems to be the same for every
10776            Linux platform */
10777         return get_errno(safe_flock(arg1, arg2));
10778     case TARGET_NR_readv:
10779         {
10780             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10781             if (vec != NULL) {
10782                 ret = get_errno(safe_readv(arg1, vec, arg3));
10783                 unlock_iovec(vec, arg2, arg3, 1);
10784             } else {
10785                 ret = -host_to_target_errno(errno);
10786             }
10787         }
10788         return ret;
10789     case TARGET_NR_writev:
10790         {
10791             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10792             if (vec != NULL) {
10793                 ret = get_errno(safe_writev(arg1, vec, arg3));
10794                 unlock_iovec(vec, arg2, arg3, 0);
10795             } else {
10796                 ret = -host_to_target_errno(errno);
10797             }
10798         }
10799         return ret;
10800 #if defined(TARGET_NR_preadv)
10801     case TARGET_NR_preadv:
10802         {
10803             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10804             if (vec != NULL) {
10805                 unsigned long low, high;
10806 
10807                 target_to_host_low_high(arg4, arg5, &low, &high);
10808                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10809                 unlock_iovec(vec, arg2, arg3, 1);
10810             } else {
10811                 ret = -host_to_target_errno(errno);
10812            }
10813         }
10814         return ret;
10815 #endif
10816 #if defined(TARGET_NR_pwritev)
10817     case TARGET_NR_pwritev:
10818         {
10819             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10820             if (vec != NULL) {
10821                 unsigned long low, high;
10822 
10823                 target_to_host_low_high(arg4, arg5, &low, &high);
10824                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10825                 unlock_iovec(vec, arg2, arg3, 0);
10826             } else {
10827                 ret = -host_to_target_errno(errno);
10828            }
10829         }
10830         return ret;
10831 #endif
10832     case TARGET_NR_getsid:
10833         return get_errno(getsid(arg1));
10834 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10835     case TARGET_NR_fdatasync:
10836         return get_errno(fdatasync(arg1));
10837 #endif
10838     case TARGET_NR_sched_getaffinity:
10839         {
10840             unsigned int mask_size;
10841             unsigned long *mask;
10842 
10843             /*
10844              * sched_getaffinity needs multiples of ulong, so need to take
10845              * care of mismatches between target ulong and host ulong sizes.
10846              */
10847             if (arg2 & (sizeof(abi_ulong) - 1)) {
10848                 return -TARGET_EINVAL;
10849             }
10850             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10851 
10852             mask = alloca(mask_size);
10853             memset(mask, 0, mask_size);
10854             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10855 
10856             if (!is_error(ret)) {
10857                 if (ret > arg2) {
10858                     /* More data returned than the caller's buffer will fit.
10859                      * This only happens if sizeof(abi_long) < sizeof(long)
10860                      * and the caller passed us a buffer holding an odd number
10861                      * of abi_longs. If the host kernel is actually using the
10862                      * extra 4 bytes then fail EINVAL; otherwise we can just
10863                      * ignore them and only copy the interesting part.
10864                      */
10865                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10866                     if (numcpus > arg2 * 8) {
10867                         return -TARGET_EINVAL;
10868                     }
10869                     ret = arg2;
10870                 }
10871 
10872                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10873                     return -TARGET_EFAULT;
10874                 }
10875             }
10876         }
10877         return ret;
10878     case TARGET_NR_sched_setaffinity:
10879         {
10880             unsigned int mask_size;
10881             unsigned long *mask;
10882 
10883             /*
10884              * sched_setaffinity needs multiples of ulong, so need to take
10885              * care of mismatches between target ulong and host ulong sizes.
10886              */
10887             if (arg2 & (sizeof(abi_ulong) - 1)) {
10888                 return -TARGET_EINVAL;
10889             }
10890             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10891             mask = alloca(mask_size);
10892 
10893             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10894             if (ret) {
10895                 return ret;
10896             }
10897 
10898             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10899         }
10900     case TARGET_NR_getcpu:
10901         {
10902             unsigned cpu, node;
10903             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10904                                        arg2 ? &node : NULL,
10905                                        NULL));
10906             if (is_error(ret)) {
10907                 return ret;
10908             }
10909             if (arg1 && put_user_u32(cpu, arg1)) {
10910                 return -TARGET_EFAULT;
10911             }
10912             if (arg2 && put_user_u32(node, arg2)) {
10913                 return -TARGET_EFAULT;
10914             }
10915         }
10916         return ret;
10917     case TARGET_NR_sched_setparam:
10918         {
10919             struct target_sched_param *target_schp;
10920             struct sched_param schp;
10921 
10922             if (arg2 == 0) {
10923                 return -TARGET_EINVAL;
10924             }
10925             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10926                 return -TARGET_EFAULT;
10927             }
10928             schp.sched_priority = tswap32(target_schp->sched_priority);
10929             unlock_user_struct(target_schp, arg2, 0);
10930             return get_errno(sys_sched_setparam(arg1, &schp));
10931         }
10932     case TARGET_NR_sched_getparam:
10933         {
10934             struct target_sched_param *target_schp;
10935             struct sched_param schp;
10936 
10937             if (arg2 == 0) {
10938                 return -TARGET_EINVAL;
10939             }
10940             ret = get_errno(sys_sched_getparam(arg1, &schp));
10941             if (!is_error(ret)) {
10942                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10943                     return -TARGET_EFAULT;
10944                 }
10945                 target_schp->sched_priority = tswap32(schp.sched_priority);
10946                 unlock_user_struct(target_schp, arg2, 1);
10947             }
10948         }
10949         return ret;
10950     case TARGET_NR_sched_setscheduler:
10951         {
10952             struct target_sched_param *target_schp;
10953             struct sched_param schp;
10954             if (arg3 == 0) {
10955                 return -TARGET_EINVAL;
10956             }
10957             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10958                 return -TARGET_EFAULT;
10959             }
10960             schp.sched_priority = tswap32(target_schp->sched_priority);
10961             unlock_user_struct(target_schp, arg3, 0);
10962             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10963         }
10964     case TARGET_NR_sched_getscheduler:
10965         return get_errno(sys_sched_getscheduler(arg1));
10966     case TARGET_NR_sched_getattr:
10967         {
10968             struct target_sched_attr *target_scha;
10969             struct sched_attr scha;
10970             if (arg2 == 0) {
10971                 return -TARGET_EINVAL;
10972             }
10973             if (arg3 > sizeof(scha)) {
10974                 arg3 = sizeof(scha);
10975             }
10976             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10977             if (!is_error(ret)) {
10978                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10979                 if (!target_scha) {
10980                     return -TARGET_EFAULT;
10981                 }
10982                 target_scha->size = tswap32(scha.size);
10983                 target_scha->sched_policy = tswap32(scha.sched_policy);
10984                 target_scha->sched_flags = tswap64(scha.sched_flags);
10985                 target_scha->sched_nice = tswap32(scha.sched_nice);
10986                 target_scha->sched_priority = tswap32(scha.sched_priority);
10987                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10988                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10989                 target_scha->sched_period = tswap64(scha.sched_period);
10990                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10991                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10992                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10993                 }
10994                 unlock_user(target_scha, arg2, arg3);
10995             }
10996             return ret;
10997         }
10998     case TARGET_NR_sched_setattr:
10999         {
11000             struct target_sched_attr *target_scha;
11001             struct sched_attr scha;
11002             uint32_t size;
11003             int zeroed;
11004             if (arg2 == 0) {
11005                 return -TARGET_EINVAL;
11006             }
11007             if (get_user_u32(size, arg2)) {
11008                 return -TARGET_EFAULT;
11009             }
11010             if (!size) {
11011                 size = offsetof(struct target_sched_attr, sched_util_min);
11012             }
11013             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11014                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11015                     return -TARGET_EFAULT;
11016                 }
11017                 return -TARGET_E2BIG;
11018             }
11019 
11020             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11021             if (zeroed < 0) {
11022                 return zeroed;
11023             } else if (zeroed == 0) {
11024                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11025                     return -TARGET_EFAULT;
11026                 }
11027                 return -TARGET_E2BIG;
11028             }
11029             if (size > sizeof(struct target_sched_attr)) {
11030                 size = sizeof(struct target_sched_attr);
11031             }
11032 
11033             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11034             if (!target_scha) {
11035                 return -TARGET_EFAULT;
11036             }
11037             scha.size = size;
11038             scha.sched_policy = tswap32(target_scha->sched_policy);
11039             scha.sched_flags = tswap64(target_scha->sched_flags);
11040             scha.sched_nice = tswap32(target_scha->sched_nice);
11041             scha.sched_priority = tswap32(target_scha->sched_priority);
11042             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11043             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11044             scha.sched_period = tswap64(target_scha->sched_period);
11045             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11046                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11047                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11048             }
11049             unlock_user(target_scha, arg2, 0);
11050             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11051         }
11052     case TARGET_NR_sched_yield:
11053         return get_errno(sched_yield());
11054     case TARGET_NR_sched_get_priority_max:
11055         return get_errno(sched_get_priority_max(arg1));
11056     case TARGET_NR_sched_get_priority_min:
11057         return get_errno(sched_get_priority_min(arg1));
11058 #ifdef TARGET_NR_sched_rr_get_interval
11059     case TARGET_NR_sched_rr_get_interval:
11060         {
11061             struct timespec ts;
11062             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11063             if (!is_error(ret)) {
11064                 ret = host_to_target_timespec(arg2, &ts);
11065             }
11066         }
11067         return ret;
11068 #endif
11069 #ifdef TARGET_NR_sched_rr_get_interval_time64
11070     case TARGET_NR_sched_rr_get_interval_time64:
11071         {
11072             struct timespec ts;
11073             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11074             if (!is_error(ret)) {
11075                 ret = host_to_target_timespec64(arg2, &ts);
11076             }
11077         }
11078         return ret;
11079 #endif
11080 #if defined(TARGET_NR_nanosleep)
11081     case TARGET_NR_nanosleep:
11082         {
11083             struct timespec req, rem;
11084             target_to_host_timespec(&req, arg1);
11085             ret = get_errno(safe_nanosleep(&req, &rem));
11086             if (is_error(ret) && arg2) {
11087                 host_to_target_timespec(arg2, &rem);
11088             }
11089         }
11090         return ret;
11091 #endif
11092     case TARGET_NR_prctl:
11093         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11094         break;
11095 #ifdef TARGET_NR_arch_prctl
11096     case TARGET_NR_arch_prctl:
11097         return do_arch_prctl(cpu_env, arg1, arg2);
11098 #endif
11099 #ifdef TARGET_NR_pread64
11100     case TARGET_NR_pread64:
11101         if (regpairs_aligned(cpu_env, num)) {
11102             arg4 = arg5;
11103             arg5 = arg6;
11104         }
11105         if (arg2 == 0 && arg3 == 0) {
11106             /* Special-case NULL buffer and zero length, which should succeed */
11107             p = 0;
11108         } else {
11109             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11110             if (!p) {
11111                 return -TARGET_EFAULT;
11112             }
11113         }
11114         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11115         unlock_user(p, arg2, ret);
11116         return ret;
11117     case TARGET_NR_pwrite64:
11118         if (regpairs_aligned(cpu_env, num)) {
11119             arg4 = arg5;
11120             arg5 = arg6;
11121         }
11122         if (arg2 == 0 && arg3 == 0) {
11123             /* Special-case NULL buffer and zero length, which should succeed */
11124             p = 0;
11125         } else {
11126             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11127             if (!p) {
11128                 return -TARGET_EFAULT;
11129             }
11130         }
11131         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11132         unlock_user(p, arg2, 0);
11133         return ret;
11134 #endif
11135     case TARGET_NR_getcwd:
11136         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11137             return -TARGET_EFAULT;
11138         ret = get_errno(sys_getcwd1(p, arg2));
11139         unlock_user(p, arg1, ret);
11140         return ret;
11141     case TARGET_NR_capget:
11142     case TARGET_NR_capset:
11143     {
11144         struct target_user_cap_header *target_header;
11145         struct target_user_cap_data *target_data = NULL;
11146         struct __user_cap_header_struct header;
11147         struct __user_cap_data_struct data[2];
11148         struct __user_cap_data_struct *dataptr = NULL;
11149         int i, target_datalen;
11150         int data_items = 1;
11151 
11152         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11153             return -TARGET_EFAULT;
11154         }
11155         header.version = tswap32(target_header->version);
11156         header.pid = tswap32(target_header->pid);
11157 
11158         if (header.version != _LINUX_CAPABILITY_VERSION) {
11159             /* Version 2 and up takes pointer to two user_data structs */
11160             data_items = 2;
11161         }
11162 
11163         target_datalen = sizeof(*target_data) * data_items;
11164 
11165         if (arg2) {
11166             if (num == TARGET_NR_capget) {
11167                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11168             } else {
11169                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11170             }
11171             if (!target_data) {
11172                 unlock_user_struct(target_header, arg1, 0);
11173                 return -TARGET_EFAULT;
11174             }
11175 
11176             if (num == TARGET_NR_capset) {
11177                 for (i = 0; i < data_items; i++) {
11178                     data[i].effective = tswap32(target_data[i].effective);
11179                     data[i].permitted = tswap32(target_data[i].permitted);
11180                     data[i].inheritable = tswap32(target_data[i].inheritable);
11181                 }
11182             }
11183 
11184             dataptr = data;
11185         }
11186 
11187         if (num == TARGET_NR_capget) {
11188             ret = get_errno(capget(&header, dataptr));
11189         } else {
11190             ret = get_errno(capset(&header, dataptr));
11191         }
11192 
11193         /* The kernel always updates version for both capget and capset */
11194         target_header->version = tswap32(header.version);
11195         unlock_user_struct(target_header, arg1, 1);
11196 
11197         if (arg2) {
11198             if (num == TARGET_NR_capget) {
11199                 for (i = 0; i < data_items; i++) {
11200                     target_data[i].effective = tswap32(data[i].effective);
11201                     target_data[i].permitted = tswap32(data[i].permitted);
11202                     target_data[i].inheritable = tswap32(data[i].inheritable);
11203                 }
11204                 unlock_user(target_data, arg2, target_datalen);
11205             } else {
11206                 unlock_user(target_data, arg2, 0);
11207             }
11208         }
11209         return ret;
11210     }
11211     case TARGET_NR_sigaltstack:
11212         return do_sigaltstack(arg1, arg2, cpu_env);
11213 
11214 #ifdef CONFIG_SENDFILE
11215 #ifdef TARGET_NR_sendfile
11216     case TARGET_NR_sendfile:
11217     {
11218         off_t *offp = NULL;
11219         off_t off;
11220         if (arg3) {
11221             ret = get_user_sal(off, arg3);
11222             if (is_error(ret)) {
11223                 return ret;
11224             }
11225             offp = &off;
11226         }
11227         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11228         if (!is_error(ret) && arg3) {
11229             abi_long ret2 = put_user_sal(off, arg3);
11230             if (is_error(ret2)) {
11231                 ret = ret2;
11232             }
11233         }
11234         return ret;
11235     }
11236 #endif
11237 #ifdef TARGET_NR_sendfile64
11238     case TARGET_NR_sendfile64:
11239     {
11240         off_t *offp = NULL;
11241         off_t off;
11242         if (arg3) {
11243             ret = get_user_s64(off, arg3);
11244             if (is_error(ret)) {
11245                 return ret;
11246             }
11247             offp = &off;
11248         }
11249         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11250         if (!is_error(ret) && arg3) {
11251             abi_long ret2 = put_user_s64(off, arg3);
11252             if (is_error(ret2)) {
11253                 ret = ret2;
11254             }
11255         }
11256         return ret;
11257     }
11258 #endif
11259 #endif
11260 #ifdef TARGET_NR_vfork
11261     case TARGET_NR_vfork:
11262         return get_errno(do_fork(cpu_env,
11263                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11264                          0, 0, 0, 0));
11265 #endif
11266 #ifdef TARGET_NR_ugetrlimit
11267     case TARGET_NR_ugetrlimit:
11268     {
11269 	struct rlimit rlim;
11270 	int resource = target_to_host_resource(arg1);
11271 	ret = get_errno(getrlimit(resource, &rlim));
11272 	if (!is_error(ret)) {
11273 	    struct target_rlimit *target_rlim;
11274             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11275                 return -TARGET_EFAULT;
11276 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11277 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11278             unlock_user_struct(target_rlim, arg2, 1);
11279 	}
11280         return ret;
11281     }
11282 #endif
11283 #ifdef TARGET_NR_truncate64
11284     case TARGET_NR_truncate64:
11285         if (!(p = lock_user_string(arg1)))
11286             return -TARGET_EFAULT;
11287 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11288         unlock_user(p, arg1, 0);
11289         return ret;
11290 #endif
11291 #ifdef TARGET_NR_ftruncate64
11292     case TARGET_NR_ftruncate64:
11293         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11294 #endif
11295 #ifdef TARGET_NR_stat64
11296     case TARGET_NR_stat64:
11297         if (!(p = lock_user_string(arg1))) {
11298             return -TARGET_EFAULT;
11299         }
11300         ret = get_errno(stat(path(p), &st));
11301         unlock_user(p, arg1, 0);
11302         if (!is_error(ret))
11303             ret = host_to_target_stat64(cpu_env, arg2, &st);
11304         return ret;
11305 #endif
11306 #ifdef TARGET_NR_lstat64
11307     case TARGET_NR_lstat64:
11308         if (!(p = lock_user_string(arg1))) {
11309             return -TARGET_EFAULT;
11310         }
11311         ret = get_errno(lstat(path(p), &st));
11312         unlock_user(p, arg1, 0);
11313         if (!is_error(ret))
11314             ret = host_to_target_stat64(cpu_env, arg2, &st);
11315         return ret;
11316 #endif
11317 #ifdef TARGET_NR_fstat64
11318     case TARGET_NR_fstat64:
11319         ret = get_errno(fstat(arg1, &st));
11320         if (!is_error(ret))
11321             ret = host_to_target_stat64(cpu_env, arg2, &st);
11322         return ret;
11323 #endif
11324 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11325 #ifdef TARGET_NR_fstatat64
11326     case TARGET_NR_fstatat64:
11327 #endif
11328 #ifdef TARGET_NR_newfstatat
11329     case TARGET_NR_newfstatat:
11330 #endif
11331         if (!(p = lock_user_string(arg2))) {
11332             return -TARGET_EFAULT;
11333         }
11334         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11335         unlock_user(p, arg2, 0);
11336         if (!is_error(ret))
11337             ret = host_to_target_stat64(cpu_env, arg3, &st);
11338         return ret;
11339 #endif
11340 #if defined(TARGET_NR_statx)
11341     case TARGET_NR_statx:
11342         {
11343             struct target_statx *target_stx;
11344             int dirfd = arg1;
11345             int flags = arg3;
11346 
11347             p = lock_user_string(arg2);
11348             if (p == NULL) {
11349                 return -TARGET_EFAULT;
11350             }
11351 #if defined(__NR_statx)
11352             {
11353                 /*
11354                  * It is assumed that struct statx is architecture independent.
11355                  */
11356                 struct target_statx host_stx;
11357                 int mask = arg4;
11358 
11359                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11360                 if (!is_error(ret)) {
11361                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11362                         unlock_user(p, arg2, 0);
11363                         return -TARGET_EFAULT;
11364                     }
11365                 }
11366 
11367                 if (ret != -TARGET_ENOSYS) {
11368                     unlock_user(p, arg2, 0);
11369                     return ret;
11370                 }
11371             }
11372 #endif
11373             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11374             unlock_user(p, arg2, 0);
11375 
11376             if (!is_error(ret)) {
11377                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11378                     return -TARGET_EFAULT;
11379                 }
11380                 memset(target_stx, 0, sizeof(*target_stx));
11381                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11382                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11383                 __put_user(st.st_ino, &target_stx->stx_ino);
11384                 __put_user(st.st_mode, &target_stx->stx_mode);
11385                 __put_user(st.st_uid, &target_stx->stx_uid);
11386                 __put_user(st.st_gid, &target_stx->stx_gid);
11387                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11388                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11389                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11390                 __put_user(st.st_size, &target_stx->stx_size);
11391                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11392                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11393                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11394                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11395                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11396                 unlock_user_struct(target_stx, arg5, 1);
11397             }
11398         }
11399         return ret;
11400 #endif
11401 #ifdef TARGET_NR_lchown
11402     case TARGET_NR_lchown:
11403         if (!(p = lock_user_string(arg1)))
11404             return -TARGET_EFAULT;
11405         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11406         unlock_user(p, arg1, 0);
11407         return ret;
11408 #endif
11409 #ifdef TARGET_NR_getuid
11410     case TARGET_NR_getuid:
11411         return get_errno(high2lowuid(getuid()));
11412 #endif
11413 #ifdef TARGET_NR_getgid
11414     case TARGET_NR_getgid:
11415         return get_errno(high2lowgid(getgid()));
11416 #endif
11417 #ifdef TARGET_NR_geteuid
11418     case TARGET_NR_geteuid:
11419         return get_errno(high2lowuid(geteuid()));
11420 #endif
11421 #ifdef TARGET_NR_getegid
11422     case TARGET_NR_getegid:
11423         return get_errno(high2lowgid(getegid()));
11424 #endif
11425     case TARGET_NR_setreuid:
11426         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11427     case TARGET_NR_setregid:
11428         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11429     case TARGET_NR_getgroups:
11430         {
11431             int gidsetsize = arg1;
11432             target_id *target_grouplist;
11433             gid_t *grouplist;
11434             int i;
11435 
11436             grouplist = alloca(gidsetsize * sizeof(gid_t));
11437             ret = get_errno(getgroups(gidsetsize, grouplist));
11438             if (gidsetsize == 0)
11439                 return ret;
11440             if (!is_error(ret)) {
11441                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11442                 if (!target_grouplist)
11443                     return -TARGET_EFAULT;
11444                 for(i = 0;i < ret; i++)
11445                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11446                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11447             }
11448         }
11449         return ret;
11450     case TARGET_NR_setgroups:
11451         {
11452             int gidsetsize = arg1;
11453             target_id *target_grouplist;
11454             gid_t *grouplist = NULL;
11455             int i;
11456             if (gidsetsize) {
11457                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11458                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11459                 if (!target_grouplist) {
11460                     return -TARGET_EFAULT;
11461                 }
11462                 for (i = 0; i < gidsetsize; i++) {
11463                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11464                 }
11465                 unlock_user(target_grouplist, arg2, 0);
11466             }
11467             return get_errno(setgroups(gidsetsize, grouplist));
11468         }
11469     case TARGET_NR_fchown:
11470         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11471 #if defined(TARGET_NR_fchownat)
11472     case TARGET_NR_fchownat:
11473         if (!(p = lock_user_string(arg2)))
11474             return -TARGET_EFAULT;
11475         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11476                                  low2highgid(arg4), arg5));
11477         unlock_user(p, arg2, 0);
11478         return ret;
11479 #endif
11480 #ifdef TARGET_NR_setresuid
11481     case TARGET_NR_setresuid:
11482         return get_errno(sys_setresuid(low2highuid(arg1),
11483                                        low2highuid(arg2),
11484                                        low2highuid(arg3)));
11485 #endif
11486 #ifdef TARGET_NR_getresuid
11487     case TARGET_NR_getresuid:
11488         {
11489             uid_t ruid, euid, suid;
11490             ret = get_errno(getresuid(&ruid, &euid, &suid));
11491             if (!is_error(ret)) {
11492                 if (put_user_id(high2lowuid(ruid), arg1)
11493                     || put_user_id(high2lowuid(euid), arg2)
11494                     || put_user_id(high2lowuid(suid), arg3))
11495                     return -TARGET_EFAULT;
11496             }
11497         }
11498         return ret;
11499 #endif
11500 #ifdef TARGET_NR_getresgid
11501     case TARGET_NR_setresgid:
11502         return get_errno(sys_setresgid(low2highgid(arg1),
11503                                        low2highgid(arg2),
11504                                        low2highgid(arg3)));
11505 #endif
11506 #ifdef TARGET_NR_getresgid
11507     case TARGET_NR_getresgid:
11508         {
11509             gid_t rgid, egid, sgid;
11510             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11511             if (!is_error(ret)) {
11512                 if (put_user_id(high2lowgid(rgid), arg1)
11513                     || put_user_id(high2lowgid(egid), arg2)
11514                     || put_user_id(high2lowgid(sgid), arg3))
11515                     return -TARGET_EFAULT;
11516             }
11517         }
11518         return ret;
11519 #endif
11520 #ifdef TARGET_NR_chown
11521     case TARGET_NR_chown:
11522         if (!(p = lock_user_string(arg1)))
11523             return -TARGET_EFAULT;
11524         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11525         unlock_user(p, arg1, 0);
11526         return ret;
11527 #endif
11528     case TARGET_NR_setuid:
11529         return get_errno(sys_setuid(low2highuid(arg1)));
11530     case TARGET_NR_setgid:
11531         return get_errno(sys_setgid(low2highgid(arg1)));
11532     case TARGET_NR_setfsuid:
11533         return get_errno(setfsuid(arg1));
11534     case TARGET_NR_setfsgid:
11535         return get_errno(setfsgid(arg1));
11536 
11537 #ifdef TARGET_NR_lchown32
11538     case TARGET_NR_lchown32:
11539         if (!(p = lock_user_string(arg1)))
11540             return -TARGET_EFAULT;
11541         ret = get_errno(lchown(p, arg2, arg3));
11542         unlock_user(p, arg1, 0);
11543         return ret;
11544 #endif
11545 #ifdef TARGET_NR_getuid32
11546     case TARGET_NR_getuid32:
11547         return get_errno(getuid());
11548 #endif
11549 
11550 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11551    /* Alpha specific */
11552     case TARGET_NR_getxuid:
11553          {
11554             uid_t euid;
11555             euid=geteuid();
11556             cpu_env->ir[IR_A4]=euid;
11557          }
11558         return get_errno(getuid());
11559 #endif
11560 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11561    /* Alpha specific */
11562     case TARGET_NR_getxgid:
11563          {
11564             uid_t egid;
11565             egid=getegid();
11566             cpu_env->ir[IR_A4]=egid;
11567          }
11568         return get_errno(getgid());
11569 #endif
11570 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11571     /* Alpha specific */
11572     case TARGET_NR_osf_getsysinfo:
11573         ret = -TARGET_EOPNOTSUPP;
11574         switch (arg1) {
11575           case TARGET_GSI_IEEE_FP_CONTROL:
11576             {
11577                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11578                 uint64_t swcr = cpu_env->swcr;
11579 
11580                 swcr &= ~SWCR_STATUS_MASK;
11581                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11582 
11583                 if (put_user_u64 (swcr, arg2))
11584                         return -TARGET_EFAULT;
11585                 ret = 0;
11586             }
11587             break;
11588 
11589           /* case GSI_IEEE_STATE_AT_SIGNAL:
11590              -- Not implemented in linux kernel.
11591              case GSI_UACPROC:
11592              -- Retrieves current unaligned access state; not much used.
11593              case GSI_PROC_TYPE:
11594              -- Retrieves implver information; surely not used.
11595              case GSI_GET_HWRPB:
11596              -- Grabs a copy of the HWRPB; surely not used.
11597           */
11598         }
11599         return ret;
11600 #endif
11601 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11602     /* Alpha specific */
11603     case TARGET_NR_osf_setsysinfo:
11604         ret = -TARGET_EOPNOTSUPP;
11605         switch (arg1) {
11606           case TARGET_SSI_IEEE_FP_CONTROL:
11607             {
11608                 uint64_t swcr, fpcr;
11609 
11610                 if (get_user_u64 (swcr, arg2)) {
11611                     return -TARGET_EFAULT;
11612                 }
11613 
11614                 /*
11615                  * The kernel calls swcr_update_status to update the
11616                  * status bits from the fpcr at every point that it
11617                  * could be queried.  Therefore, we store the status
11618                  * bits only in FPCR.
11619                  */
11620                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11621 
11622                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11623                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11624                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11625                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11626                 ret = 0;
11627             }
11628             break;
11629 
11630           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11631             {
11632                 uint64_t exc, fpcr, fex;
11633 
11634                 if (get_user_u64(exc, arg2)) {
11635                     return -TARGET_EFAULT;
11636                 }
11637                 exc &= SWCR_STATUS_MASK;
11638                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11639 
11640                 /* Old exceptions are not signaled.  */
11641                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11642                 fex = exc & ~fex;
11643                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11644                 fex &= (cpu_env)->swcr;
11645 
11646                 /* Update the hardware fpcr.  */
11647                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11648                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11649 
11650                 if (fex) {
11651                     int si_code = TARGET_FPE_FLTUNK;
11652                     target_siginfo_t info;
11653 
11654                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11655                         si_code = TARGET_FPE_FLTUND;
11656                     }
11657                     if (fex & SWCR_TRAP_ENABLE_INE) {
11658                         si_code = TARGET_FPE_FLTRES;
11659                     }
11660                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11661                         si_code = TARGET_FPE_FLTUND;
11662                     }
11663                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11664                         si_code = TARGET_FPE_FLTOVF;
11665                     }
11666                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11667                         si_code = TARGET_FPE_FLTDIV;
11668                     }
11669                     if (fex & SWCR_TRAP_ENABLE_INV) {
11670                         si_code = TARGET_FPE_FLTINV;
11671                     }
11672 
11673                     info.si_signo = SIGFPE;
11674                     info.si_errno = 0;
11675                     info.si_code = si_code;
11676                     info._sifields._sigfault._addr = (cpu_env)->pc;
11677                     queue_signal(cpu_env, info.si_signo,
11678                                  QEMU_SI_FAULT, &info);
11679                 }
11680                 ret = 0;
11681             }
11682             break;
11683 
11684           /* case SSI_NVPAIRS:
11685              -- Used with SSIN_UACPROC to enable unaligned accesses.
11686              case SSI_IEEE_STATE_AT_SIGNAL:
11687              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11688              -- Not implemented in linux kernel
11689           */
11690         }
11691         return ret;
11692 #endif
11693 #ifdef TARGET_NR_osf_sigprocmask
11694     /* Alpha specific.  */
11695     case TARGET_NR_osf_sigprocmask:
11696         {
11697             abi_ulong mask;
11698             int how;
11699             sigset_t set, oldset;
11700 
11701             switch(arg1) {
11702             case TARGET_SIG_BLOCK:
11703                 how = SIG_BLOCK;
11704                 break;
11705             case TARGET_SIG_UNBLOCK:
11706                 how = SIG_UNBLOCK;
11707                 break;
11708             case TARGET_SIG_SETMASK:
11709                 how = SIG_SETMASK;
11710                 break;
11711             default:
11712                 return -TARGET_EINVAL;
11713             }
11714             mask = arg2;
11715             target_to_host_old_sigset(&set, &mask);
11716             ret = do_sigprocmask(how, &set, &oldset);
11717             if (!ret) {
11718                 host_to_target_old_sigset(&mask, &oldset);
11719                 ret = mask;
11720             }
11721         }
11722         return ret;
11723 #endif
11724 
11725 #ifdef TARGET_NR_getgid32
11726     case TARGET_NR_getgid32:
11727         return get_errno(getgid());
11728 #endif
11729 #ifdef TARGET_NR_geteuid32
11730     case TARGET_NR_geteuid32:
11731         return get_errno(geteuid());
11732 #endif
11733 #ifdef TARGET_NR_getegid32
11734     case TARGET_NR_getegid32:
11735         return get_errno(getegid());
11736 #endif
11737 #ifdef TARGET_NR_setreuid32
11738     case TARGET_NR_setreuid32:
11739         return get_errno(setreuid(arg1, arg2));
11740 #endif
11741 #ifdef TARGET_NR_setregid32
11742     case TARGET_NR_setregid32:
11743         return get_errno(setregid(arg1, arg2));
11744 #endif
11745 #ifdef TARGET_NR_getgroups32
11746     case TARGET_NR_getgroups32:
11747         {
11748             int gidsetsize = arg1;
11749             uint32_t *target_grouplist;
11750             gid_t *grouplist;
11751             int i;
11752 
11753             grouplist = alloca(gidsetsize * sizeof(gid_t));
11754             ret = get_errno(getgroups(gidsetsize, grouplist));
11755             if (gidsetsize == 0)
11756                 return ret;
11757             if (!is_error(ret)) {
11758                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11759                 if (!target_grouplist) {
11760                     return -TARGET_EFAULT;
11761                 }
11762                 for(i = 0;i < ret; i++)
11763                     target_grouplist[i] = tswap32(grouplist[i]);
11764                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11765             }
11766         }
11767         return ret;
11768 #endif
11769 #ifdef TARGET_NR_setgroups32
11770     case TARGET_NR_setgroups32:
11771         {
11772             int gidsetsize = arg1;
11773             uint32_t *target_grouplist;
11774             gid_t *grouplist;
11775             int i;
11776 
11777             grouplist = alloca(gidsetsize * sizeof(gid_t));
11778             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11779             if (!target_grouplist) {
11780                 return -TARGET_EFAULT;
11781             }
11782             for(i = 0;i < gidsetsize; i++)
11783                 grouplist[i] = tswap32(target_grouplist[i]);
11784             unlock_user(target_grouplist, arg2, 0);
11785             return get_errno(setgroups(gidsetsize, grouplist));
11786         }
11787 #endif
11788 #ifdef TARGET_NR_fchown32
11789     case TARGET_NR_fchown32:
11790         return get_errno(fchown(arg1, arg2, arg3));
11791 #endif
11792 #ifdef TARGET_NR_setresuid32
11793     case TARGET_NR_setresuid32:
11794         return get_errno(sys_setresuid(arg1, arg2, arg3));
11795 #endif
11796 #ifdef TARGET_NR_getresuid32
11797     case TARGET_NR_getresuid32:
11798         {
11799             uid_t ruid, euid, suid;
11800             ret = get_errno(getresuid(&ruid, &euid, &suid));
11801             if (!is_error(ret)) {
11802                 if (put_user_u32(ruid, arg1)
11803                     || put_user_u32(euid, arg2)
11804                     || put_user_u32(suid, arg3))
11805                     return -TARGET_EFAULT;
11806             }
11807         }
11808         return ret;
11809 #endif
11810 #ifdef TARGET_NR_setresgid32
11811     case TARGET_NR_setresgid32:
11812         return get_errno(sys_setresgid(arg1, arg2, arg3));
11813 #endif
11814 #ifdef TARGET_NR_getresgid32
11815     case TARGET_NR_getresgid32:
11816         {
11817             gid_t rgid, egid, sgid;
11818             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11819             if (!is_error(ret)) {
11820                 if (put_user_u32(rgid, arg1)
11821                     || put_user_u32(egid, arg2)
11822                     || put_user_u32(sgid, arg3))
11823                     return -TARGET_EFAULT;
11824             }
11825         }
11826         return ret;
11827 #endif
11828 #ifdef TARGET_NR_chown32
11829     case TARGET_NR_chown32:
11830         if (!(p = lock_user_string(arg1)))
11831             return -TARGET_EFAULT;
11832         ret = get_errno(chown(p, arg2, arg3));
11833         unlock_user(p, arg1, 0);
11834         return ret;
11835 #endif
11836 #ifdef TARGET_NR_setuid32
11837     case TARGET_NR_setuid32:
11838         return get_errno(sys_setuid(arg1));
11839 #endif
11840 #ifdef TARGET_NR_setgid32
11841     case TARGET_NR_setgid32:
11842         return get_errno(sys_setgid(arg1));
11843 #endif
11844 #ifdef TARGET_NR_setfsuid32
11845     case TARGET_NR_setfsuid32:
11846         return get_errno(setfsuid(arg1));
11847 #endif
11848 #ifdef TARGET_NR_setfsgid32
11849     case TARGET_NR_setfsgid32:
11850         return get_errno(setfsgid(arg1));
11851 #endif
11852 #ifdef TARGET_NR_mincore
11853     case TARGET_NR_mincore:
11854         {
11855             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11856             if (!a) {
11857                 return -TARGET_ENOMEM;
11858             }
11859             p = lock_user_string(arg3);
11860             if (!p) {
11861                 ret = -TARGET_EFAULT;
11862             } else {
11863                 ret = get_errno(mincore(a, arg2, p));
11864                 unlock_user(p, arg3, ret);
11865             }
11866             unlock_user(a, arg1, 0);
11867         }
11868         return ret;
11869 #endif
11870 #ifdef TARGET_NR_arm_fadvise64_64
11871     case TARGET_NR_arm_fadvise64_64:
11872         /* arm_fadvise64_64 looks like fadvise64_64 but
11873          * with different argument order: fd, advice, offset, len
11874          * rather than the usual fd, offset, len, advice.
11875          * Note that offset and len are both 64-bit so appear as
11876          * pairs of 32-bit registers.
11877          */
11878         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11879                             target_offset64(arg5, arg6), arg2);
11880         return -host_to_target_errno(ret);
11881 #endif
11882 
11883 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11884 
11885 #ifdef TARGET_NR_fadvise64_64
11886     case TARGET_NR_fadvise64_64:
11887 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11888         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11889         ret = arg2;
11890         arg2 = arg3;
11891         arg3 = arg4;
11892         arg4 = arg5;
11893         arg5 = arg6;
11894         arg6 = ret;
11895 #else
11896         /* 6 args: fd, offset (high, low), len (high, low), advice */
11897         if (regpairs_aligned(cpu_env, num)) {
11898             /* offset is in (3,4), len in (5,6) and advice in 7 */
11899             arg2 = arg3;
11900             arg3 = arg4;
11901             arg4 = arg5;
11902             arg5 = arg6;
11903             arg6 = arg7;
11904         }
11905 #endif
11906         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11907                             target_offset64(arg4, arg5), arg6);
11908         return -host_to_target_errno(ret);
11909 #endif
11910 
11911 #ifdef TARGET_NR_fadvise64
11912     case TARGET_NR_fadvise64:
11913         /* 5 args: fd, offset (high, low), len, advice */
11914         if (regpairs_aligned(cpu_env, num)) {
11915             /* offset is in (3,4), len in 5 and advice in 6 */
11916             arg2 = arg3;
11917             arg3 = arg4;
11918             arg4 = arg5;
11919             arg5 = arg6;
11920         }
11921         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11922         return -host_to_target_errno(ret);
11923 #endif
11924 
11925 #else /* not a 32-bit ABI */
11926 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11927 #ifdef TARGET_NR_fadvise64_64
11928     case TARGET_NR_fadvise64_64:
11929 #endif
11930 #ifdef TARGET_NR_fadvise64
11931     case TARGET_NR_fadvise64:
11932 #endif
11933 #ifdef TARGET_S390X
11934         switch (arg4) {
11935         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11936         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11937         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11938         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11939         default: break;
11940         }
11941 #endif
11942         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11943 #endif
11944 #endif /* end of 64-bit ABI fadvise handling */
11945 
11946 #ifdef TARGET_NR_madvise
11947     case TARGET_NR_madvise:
11948         return target_madvise(arg1, arg2, arg3);
11949 #endif
11950 #ifdef TARGET_NR_fcntl64
11951     case TARGET_NR_fcntl64:
11952     {
11953         int cmd;
11954         struct flock64 fl;
11955         from_flock64_fn *copyfrom = copy_from_user_flock64;
11956         to_flock64_fn *copyto = copy_to_user_flock64;
11957 
11958 #ifdef TARGET_ARM
11959         if (!cpu_env->eabi) {
11960             copyfrom = copy_from_user_oabi_flock64;
11961             copyto = copy_to_user_oabi_flock64;
11962         }
11963 #endif
11964 
11965         cmd = target_to_host_fcntl_cmd(arg2);
11966         if (cmd == -TARGET_EINVAL) {
11967             return cmd;
11968         }
11969 
11970         switch(arg2) {
11971         case TARGET_F_GETLK64:
11972             ret = copyfrom(&fl, arg3);
11973             if (ret) {
11974                 break;
11975             }
11976             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11977             if (ret == 0) {
11978                 ret = copyto(arg3, &fl);
11979             }
11980 	    break;
11981 
11982         case TARGET_F_SETLK64:
11983         case TARGET_F_SETLKW64:
11984             ret = copyfrom(&fl, arg3);
11985             if (ret) {
11986                 break;
11987             }
11988             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11989 	    break;
11990         default:
11991             ret = do_fcntl(arg1, arg2, arg3);
11992             break;
11993         }
11994         return ret;
11995     }
11996 #endif
11997 #ifdef TARGET_NR_cacheflush
11998     case TARGET_NR_cacheflush:
11999         /* self-modifying code is handled automatically, so nothing needed */
12000         return 0;
12001 #endif
12002 #ifdef TARGET_NR_getpagesize
12003     case TARGET_NR_getpagesize:
12004         return TARGET_PAGE_SIZE;
12005 #endif
12006     case TARGET_NR_gettid:
12007         return get_errno(sys_gettid());
12008 #ifdef TARGET_NR_readahead
12009     case TARGET_NR_readahead:
12010 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12011         if (regpairs_aligned(cpu_env, num)) {
12012             arg2 = arg3;
12013             arg3 = arg4;
12014             arg4 = arg5;
12015         }
12016         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12017 #else
12018         ret = get_errno(readahead(arg1, arg2, arg3));
12019 #endif
12020         return ret;
12021 #endif
12022 #ifdef CONFIG_ATTR
12023 #ifdef TARGET_NR_setxattr
12024     case TARGET_NR_listxattr:
12025     case TARGET_NR_llistxattr:
12026     {
12027         void *p, *b = 0;
12028         if (arg2) {
12029             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12030             if (!b) {
12031                 return -TARGET_EFAULT;
12032             }
12033         }
12034         p = lock_user_string(arg1);
12035         if (p) {
12036             if (num == TARGET_NR_listxattr) {
12037                 ret = get_errno(listxattr(p, b, arg3));
12038             } else {
12039                 ret = get_errno(llistxattr(p, b, arg3));
12040             }
12041         } else {
12042             ret = -TARGET_EFAULT;
12043         }
12044         unlock_user(p, arg1, 0);
12045         unlock_user(b, arg2, arg3);
12046         return ret;
12047     }
12048     case TARGET_NR_flistxattr:
12049     {
12050         void *b = 0;
12051         if (arg2) {
12052             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12053             if (!b) {
12054                 return -TARGET_EFAULT;
12055             }
12056         }
12057         ret = get_errno(flistxattr(arg1, b, arg3));
12058         unlock_user(b, arg2, arg3);
12059         return ret;
12060     }
12061     case TARGET_NR_setxattr:
12062     case TARGET_NR_lsetxattr:
12063         {
12064             void *p, *n, *v = 0;
12065             if (arg3) {
12066                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12067                 if (!v) {
12068                     return -TARGET_EFAULT;
12069                 }
12070             }
12071             p = lock_user_string(arg1);
12072             n = lock_user_string(arg2);
12073             if (p && n) {
12074                 if (num == TARGET_NR_setxattr) {
12075                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12076                 } else {
12077                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12078                 }
12079             } else {
12080                 ret = -TARGET_EFAULT;
12081             }
12082             unlock_user(p, arg1, 0);
12083             unlock_user(n, arg2, 0);
12084             unlock_user(v, arg3, 0);
12085         }
12086         return ret;
12087     case TARGET_NR_fsetxattr:
12088         {
12089             void *n, *v = 0;
12090             if (arg3) {
12091                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12092                 if (!v) {
12093                     return -TARGET_EFAULT;
12094                 }
12095             }
12096             n = lock_user_string(arg2);
12097             if (n) {
12098                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12099             } else {
12100                 ret = -TARGET_EFAULT;
12101             }
12102             unlock_user(n, arg2, 0);
12103             unlock_user(v, arg3, 0);
12104         }
12105         return ret;
12106     case TARGET_NR_getxattr:
12107     case TARGET_NR_lgetxattr:
12108         {
12109             void *p, *n, *v = 0;
12110             if (arg3) {
12111                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12112                 if (!v) {
12113                     return -TARGET_EFAULT;
12114                 }
12115             }
12116             p = lock_user_string(arg1);
12117             n = lock_user_string(arg2);
12118             if (p && n) {
12119                 if (num == TARGET_NR_getxattr) {
12120                     ret = get_errno(getxattr(p, n, v, arg4));
12121                 } else {
12122                     ret = get_errno(lgetxattr(p, n, v, arg4));
12123                 }
12124             } else {
12125                 ret = -TARGET_EFAULT;
12126             }
12127             unlock_user(p, arg1, 0);
12128             unlock_user(n, arg2, 0);
12129             unlock_user(v, arg3, arg4);
12130         }
12131         return ret;
12132     case TARGET_NR_fgetxattr:
12133         {
12134             void *n, *v = 0;
12135             if (arg3) {
12136                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12137                 if (!v) {
12138                     return -TARGET_EFAULT;
12139                 }
12140             }
12141             n = lock_user_string(arg2);
12142             if (n) {
12143                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12144             } else {
12145                 ret = -TARGET_EFAULT;
12146             }
12147             unlock_user(n, arg2, 0);
12148             unlock_user(v, arg3, arg4);
12149         }
12150         return ret;
12151     case TARGET_NR_removexattr:
12152     case TARGET_NR_lremovexattr:
12153         {
12154             void *p, *n;
12155             p = lock_user_string(arg1);
12156             n = lock_user_string(arg2);
12157             if (p && n) {
12158                 if (num == TARGET_NR_removexattr) {
12159                     ret = get_errno(removexattr(p, n));
12160                 } else {
12161                     ret = get_errno(lremovexattr(p, n));
12162                 }
12163             } else {
12164                 ret = -TARGET_EFAULT;
12165             }
12166             unlock_user(p, arg1, 0);
12167             unlock_user(n, arg2, 0);
12168         }
12169         return ret;
12170     case TARGET_NR_fremovexattr:
12171         {
12172             void *n;
12173             n = lock_user_string(arg2);
12174             if (n) {
12175                 ret = get_errno(fremovexattr(arg1, n));
12176             } else {
12177                 ret = -TARGET_EFAULT;
12178             }
12179             unlock_user(n, arg2, 0);
12180         }
12181         return ret;
12182 #endif
12183 #endif /* CONFIG_ATTR */
12184 #ifdef TARGET_NR_set_thread_area
12185     case TARGET_NR_set_thread_area:
12186 #if defined(TARGET_MIPS)
12187       cpu_env->active_tc.CP0_UserLocal = arg1;
12188       return 0;
12189 #elif defined(TARGET_CRIS)
12190       if (arg1 & 0xff)
12191           ret = -TARGET_EINVAL;
12192       else {
12193           cpu_env->pregs[PR_PID] = arg1;
12194           ret = 0;
12195       }
12196       return ret;
12197 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12198       return do_set_thread_area(cpu_env, arg1);
12199 #elif defined(TARGET_M68K)
12200       {
12201           TaskState *ts = cpu->opaque;
12202           ts->tp_value = arg1;
12203           return 0;
12204       }
12205 #else
12206       return -TARGET_ENOSYS;
12207 #endif
12208 #endif
12209 #ifdef TARGET_NR_get_thread_area
12210     case TARGET_NR_get_thread_area:
12211 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12212         return do_get_thread_area(cpu_env, arg1);
12213 #elif defined(TARGET_M68K)
12214         {
12215             TaskState *ts = cpu->opaque;
12216             return ts->tp_value;
12217         }
12218 #else
12219         return -TARGET_ENOSYS;
12220 #endif
12221 #endif
12222 #ifdef TARGET_NR_getdomainname
12223     case TARGET_NR_getdomainname:
12224         return -TARGET_ENOSYS;
12225 #endif
12226 
12227 #ifdef TARGET_NR_clock_settime
12228     case TARGET_NR_clock_settime:
12229     {
12230         struct timespec ts;
12231 
12232         ret = target_to_host_timespec(&ts, arg2);
12233         if (!is_error(ret)) {
12234             ret = get_errno(clock_settime(arg1, &ts));
12235         }
12236         return ret;
12237     }
12238 #endif
12239 #ifdef TARGET_NR_clock_settime64
12240     case TARGET_NR_clock_settime64:
12241     {
12242         struct timespec ts;
12243 
12244         ret = target_to_host_timespec64(&ts, arg2);
12245         if (!is_error(ret)) {
12246             ret = get_errno(clock_settime(arg1, &ts));
12247         }
12248         return ret;
12249     }
12250 #endif
12251 #ifdef TARGET_NR_clock_gettime
12252     case TARGET_NR_clock_gettime:
12253     {
12254         struct timespec ts;
12255         ret = get_errno(clock_gettime(arg1, &ts));
12256         if (!is_error(ret)) {
12257             ret = host_to_target_timespec(arg2, &ts);
12258         }
12259         return ret;
12260     }
12261 #endif
12262 #ifdef TARGET_NR_clock_gettime64
12263     case TARGET_NR_clock_gettime64:
12264     {
12265         struct timespec ts;
12266         ret = get_errno(clock_gettime(arg1, &ts));
12267         if (!is_error(ret)) {
12268             ret = host_to_target_timespec64(arg2, &ts);
12269         }
12270         return ret;
12271     }
12272 #endif
12273 #ifdef TARGET_NR_clock_getres
12274     case TARGET_NR_clock_getres:
12275     {
12276         struct timespec ts;
12277         ret = get_errno(clock_getres(arg1, &ts));
12278         if (!is_error(ret)) {
12279             host_to_target_timespec(arg2, &ts);
12280         }
12281         return ret;
12282     }
12283 #endif
12284 #ifdef TARGET_NR_clock_getres_time64
12285     case TARGET_NR_clock_getres_time64:
12286     {
12287         struct timespec ts;
12288         ret = get_errno(clock_getres(arg1, &ts));
12289         if (!is_error(ret)) {
12290             host_to_target_timespec64(arg2, &ts);
12291         }
12292         return ret;
12293     }
12294 #endif
12295 #ifdef TARGET_NR_clock_nanosleep
12296     case TARGET_NR_clock_nanosleep:
12297     {
12298         struct timespec ts;
12299         if (target_to_host_timespec(&ts, arg3)) {
12300             return -TARGET_EFAULT;
12301         }
12302         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12303                                              &ts, arg4 ? &ts : NULL));
12304         /*
12305          * if the call is interrupted by a signal handler, it fails
12306          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12307          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12308          */
12309         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12310             host_to_target_timespec(arg4, &ts)) {
12311               return -TARGET_EFAULT;
12312         }
12313 
12314         return ret;
12315     }
12316 #endif
12317 #ifdef TARGET_NR_clock_nanosleep_time64
12318     case TARGET_NR_clock_nanosleep_time64:
12319     {
12320         struct timespec ts;
12321 
12322         if (target_to_host_timespec64(&ts, arg3)) {
12323             return -TARGET_EFAULT;
12324         }
12325 
12326         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12327                                              &ts, arg4 ? &ts : NULL));
12328 
12329         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12330             host_to_target_timespec64(arg4, &ts)) {
12331             return -TARGET_EFAULT;
12332         }
12333         return ret;
12334     }
12335 #endif
12336 
12337 #if defined(TARGET_NR_set_tid_address)
12338     case TARGET_NR_set_tid_address:
12339     {
12340         TaskState *ts = cpu->opaque;
12341         ts->child_tidptr = arg1;
12342         /* do not call host set_tid_address() syscall, instead return tid() */
12343         return get_errno(sys_gettid());
12344     }
12345 #endif
12346 
12347     case TARGET_NR_tkill:
12348         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12349 
12350     case TARGET_NR_tgkill:
12351         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12352                          target_to_host_signal(arg3)));
12353 
12354 #ifdef TARGET_NR_set_robust_list
12355     case TARGET_NR_set_robust_list:
12356     case TARGET_NR_get_robust_list:
12357         /* The ABI for supporting robust futexes has userspace pass
12358          * the kernel a pointer to a linked list which is updated by
12359          * userspace after the syscall; the list is walked by the kernel
12360          * when the thread exits. Since the linked list in QEMU guest
12361          * memory isn't a valid linked list for the host and we have
12362          * no way to reliably intercept the thread-death event, we can't
12363          * support these. Silently return ENOSYS so that guest userspace
12364          * falls back to a non-robust futex implementation (which should
12365          * be OK except in the corner case of the guest crashing while
12366          * holding a mutex that is shared with another process via
12367          * shared memory).
12368          */
12369         return -TARGET_ENOSYS;
12370 #endif
12371 
12372 #if defined(TARGET_NR_utimensat)
12373     case TARGET_NR_utimensat:
12374         {
12375             struct timespec *tsp, ts[2];
12376             if (!arg3) {
12377                 tsp = NULL;
12378             } else {
12379                 if (target_to_host_timespec(ts, arg3)) {
12380                     return -TARGET_EFAULT;
12381                 }
12382                 if (target_to_host_timespec(ts + 1, arg3 +
12383                                             sizeof(struct target_timespec))) {
12384                     return -TARGET_EFAULT;
12385                 }
12386                 tsp = ts;
12387             }
12388             if (!arg2)
12389                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12390             else {
12391                 if (!(p = lock_user_string(arg2))) {
12392                     return -TARGET_EFAULT;
12393                 }
12394                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12395                 unlock_user(p, arg2, 0);
12396             }
12397         }
12398         return ret;
12399 #endif
12400 #ifdef TARGET_NR_utimensat_time64
12401     case TARGET_NR_utimensat_time64:
12402         {
12403             struct timespec *tsp, ts[2];
12404             if (!arg3) {
12405                 tsp = NULL;
12406             } else {
12407                 if (target_to_host_timespec64(ts, arg3)) {
12408                     return -TARGET_EFAULT;
12409                 }
12410                 if (target_to_host_timespec64(ts + 1, arg3 +
12411                                      sizeof(struct target__kernel_timespec))) {
12412                     return -TARGET_EFAULT;
12413                 }
12414                 tsp = ts;
12415             }
12416             if (!arg2)
12417                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12418             else {
12419                 p = lock_user_string(arg2);
12420                 if (!p) {
12421                     return -TARGET_EFAULT;
12422                 }
12423                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12424                 unlock_user(p, arg2, 0);
12425             }
12426         }
12427         return ret;
12428 #endif
12429 #ifdef TARGET_NR_futex
12430     case TARGET_NR_futex:
12431         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12432 #endif
12433 #ifdef TARGET_NR_futex_time64
12434     case TARGET_NR_futex_time64:
12435         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12436 #endif
12437 #ifdef CONFIG_INOTIFY
12438 #if defined(TARGET_NR_inotify_init)
12439     case TARGET_NR_inotify_init:
12440         ret = get_errno(inotify_init());
12441         if (ret >= 0) {
12442             fd_trans_register(ret, &target_inotify_trans);
12443         }
12444         return ret;
12445 #endif
12446 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12447     case TARGET_NR_inotify_init1:
12448         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12449                                           fcntl_flags_tbl)));
12450         if (ret >= 0) {
12451             fd_trans_register(ret, &target_inotify_trans);
12452         }
12453         return ret;
12454 #endif
12455 #if defined(TARGET_NR_inotify_add_watch)
12456     case TARGET_NR_inotify_add_watch:
12457         p = lock_user_string(arg2);
12458         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12459         unlock_user(p, arg2, 0);
12460         return ret;
12461 #endif
12462 #if defined(TARGET_NR_inotify_rm_watch)
12463     case TARGET_NR_inotify_rm_watch:
12464         return get_errno(inotify_rm_watch(arg1, arg2));
12465 #endif
12466 #endif
12467 
12468 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12469     case TARGET_NR_mq_open:
12470         {
12471             struct mq_attr posix_mq_attr;
12472             struct mq_attr *pposix_mq_attr;
12473             int host_flags;
12474 
12475             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12476             pposix_mq_attr = NULL;
12477             if (arg4) {
12478                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12479                     return -TARGET_EFAULT;
12480                 }
12481                 pposix_mq_attr = &posix_mq_attr;
12482             }
12483             p = lock_user_string(arg1 - 1);
12484             if (!p) {
12485                 return -TARGET_EFAULT;
12486             }
12487             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12488             unlock_user (p, arg1, 0);
12489         }
12490         return ret;
12491 
12492     case TARGET_NR_mq_unlink:
12493         p = lock_user_string(arg1 - 1);
12494         if (!p) {
12495             return -TARGET_EFAULT;
12496         }
12497         ret = get_errno(mq_unlink(p));
12498         unlock_user (p, arg1, 0);
12499         return ret;
12500 
12501 #ifdef TARGET_NR_mq_timedsend
12502     case TARGET_NR_mq_timedsend:
12503         {
12504             struct timespec ts;
12505 
12506             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12507             if (arg5 != 0) {
12508                 if (target_to_host_timespec(&ts, arg5)) {
12509                     return -TARGET_EFAULT;
12510                 }
12511                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12512                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12513                     return -TARGET_EFAULT;
12514                 }
12515             } else {
12516                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12517             }
12518             unlock_user (p, arg2, arg3);
12519         }
12520         return ret;
12521 #endif
12522 #ifdef TARGET_NR_mq_timedsend_time64
12523     case TARGET_NR_mq_timedsend_time64:
12524         {
12525             struct timespec ts;
12526 
12527             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12528             if (arg5 != 0) {
12529                 if (target_to_host_timespec64(&ts, arg5)) {
12530                     return -TARGET_EFAULT;
12531                 }
12532                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12533                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536             } else {
12537                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12538             }
12539             unlock_user(p, arg2, arg3);
12540         }
12541         return ret;
12542 #endif
12543 
12544 #ifdef TARGET_NR_mq_timedreceive
12545     case TARGET_NR_mq_timedreceive:
12546         {
12547             struct timespec ts;
12548             unsigned int prio;
12549 
12550             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12551             if (arg5 != 0) {
12552                 if (target_to_host_timespec(&ts, arg5)) {
12553                     return -TARGET_EFAULT;
12554                 }
12555                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12556                                                      &prio, &ts));
12557                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12558                     return -TARGET_EFAULT;
12559                 }
12560             } else {
12561                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12562                                                      &prio, NULL));
12563             }
12564             unlock_user (p, arg2, arg3);
12565             if (arg4 != 0)
12566                 put_user_u32(prio, arg4);
12567         }
12568         return ret;
12569 #endif
12570 #ifdef TARGET_NR_mq_timedreceive_time64
12571     case TARGET_NR_mq_timedreceive_time64:
12572         {
12573             struct timespec ts;
12574             unsigned int prio;
12575 
12576             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12577             if (arg5 != 0) {
12578                 if (target_to_host_timespec64(&ts, arg5)) {
12579                     return -TARGET_EFAULT;
12580                 }
12581                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12582                                                      &prio, &ts));
12583                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12584                     return -TARGET_EFAULT;
12585                 }
12586             } else {
12587                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12588                                                      &prio, NULL));
12589             }
12590             unlock_user(p, arg2, arg3);
12591             if (arg4 != 0) {
12592                 put_user_u32(prio, arg4);
12593             }
12594         }
12595         return ret;
12596 #endif
12597 
12598     /* Not implemented for now... */
12599 /*     case TARGET_NR_mq_notify: */
12600 /*         break; */
12601 
12602     case TARGET_NR_mq_getsetattr:
12603         {
12604             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12605             ret = 0;
12606             if (arg2 != 0) {
12607                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12608                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12609                                            &posix_mq_attr_out));
12610             } else if (arg3 != 0) {
12611                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12612             }
12613             if (ret == 0 && arg3 != 0) {
12614                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12615             }
12616         }
12617         return ret;
12618 #endif
12619 
12620 #ifdef CONFIG_SPLICE
12621 #ifdef TARGET_NR_tee
12622     case TARGET_NR_tee:
12623         {
12624             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12625         }
12626         return ret;
12627 #endif
12628 #ifdef TARGET_NR_splice
12629     case TARGET_NR_splice:
12630         {
12631             loff_t loff_in, loff_out;
12632             loff_t *ploff_in = NULL, *ploff_out = NULL;
12633             if (arg2) {
12634                 if (get_user_u64(loff_in, arg2)) {
12635                     return -TARGET_EFAULT;
12636                 }
12637                 ploff_in = &loff_in;
12638             }
12639             if (arg4) {
12640                 if (get_user_u64(loff_out, arg4)) {
12641                     return -TARGET_EFAULT;
12642                 }
12643                 ploff_out = &loff_out;
12644             }
12645             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12646             if (arg2) {
12647                 if (put_user_u64(loff_in, arg2)) {
12648                     return -TARGET_EFAULT;
12649                 }
12650             }
12651             if (arg4) {
12652                 if (put_user_u64(loff_out, arg4)) {
12653                     return -TARGET_EFAULT;
12654                 }
12655             }
12656         }
12657         return ret;
12658 #endif
12659 #ifdef TARGET_NR_vmsplice
12660 	case TARGET_NR_vmsplice:
12661         {
12662             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12663             if (vec != NULL) {
12664                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12665                 unlock_iovec(vec, arg2, arg3, 0);
12666             } else {
12667                 ret = -host_to_target_errno(errno);
12668             }
12669         }
12670         return ret;
12671 #endif
12672 #endif /* CONFIG_SPLICE */
12673 #ifdef CONFIG_EVENTFD
12674 #if defined(TARGET_NR_eventfd)
12675     case TARGET_NR_eventfd:
12676         ret = get_errno(eventfd(arg1, 0));
12677         if (ret >= 0) {
12678             fd_trans_register(ret, &target_eventfd_trans);
12679         }
12680         return ret;
12681 #endif
12682 #if defined(TARGET_NR_eventfd2)
12683     case TARGET_NR_eventfd2:
12684     {
12685         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12686         if (arg2 & TARGET_O_NONBLOCK) {
12687             host_flags |= O_NONBLOCK;
12688         }
12689         if (arg2 & TARGET_O_CLOEXEC) {
12690             host_flags |= O_CLOEXEC;
12691         }
12692         ret = get_errno(eventfd(arg1, host_flags));
12693         if (ret >= 0) {
12694             fd_trans_register(ret, &target_eventfd_trans);
12695         }
12696         return ret;
12697     }
12698 #endif
12699 #endif /* CONFIG_EVENTFD  */
12700 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12701     case TARGET_NR_fallocate:
12702 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12703         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12704                                   target_offset64(arg5, arg6)));
12705 #else
12706         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12707 #endif
12708         return ret;
12709 #endif
12710 #if defined(CONFIG_SYNC_FILE_RANGE)
12711 #if defined(TARGET_NR_sync_file_range)
12712     case TARGET_NR_sync_file_range:
12713 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12714 #if defined(TARGET_MIPS)
12715         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12716                                         target_offset64(arg5, arg6), arg7));
12717 #else
12718         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12719                                         target_offset64(arg4, arg5), arg6));
12720 #endif /* !TARGET_MIPS */
12721 #else
12722         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12723 #endif
12724         return ret;
12725 #endif
12726 #if defined(TARGET_NR_sync_file_range2) || \
12727     defined(TARGET_NR_arm_sync_file_range)
12728 #if defined(TARGET_NR_sync_file_range2)
12729     case TARGET_NR_sync_file_range2:
12730 #endif
12731 #if defined(TARGET_NR_arm_sync_file_range)
12732     case TARGET_NR_arm_sync_file_range:
12733 #endif
12734         /* This is like sync_file_range but the arguments are reordered */
12735 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12736         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12737                                         target_offset64(arg5, arg6), arg2));
12738 #else
12739         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12740 #endif
12741         return ret;
12742 #endif
12743 #endif
12744 #if defined(TARGET_NR_signalfd4)
12745     case TARGET_NR_signalfd4:
12746         return do_signalfd4(arg1, arg2, arg4);
12747 #endif
12748 #if defined(TARGET_NR_signalfd)
12749     case TARGET_NR_signalfd:
12750         return do_signalfd4(arg1, arg2, 0);
12751 #endif
12752 #if defined(CONFIG_EPOLL)
12753 #if defined(TARGET_NR_epoll_create)
12754     case TARGET_NR_epoll_create:
12755         return get_errno(epoll_create(arg1));
12756 #endif
12757 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12758     case TARGET_NR_epoll_create1:
12759         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12760 #endif
12761 #if defined(TARGET_NR_epoll_ctl)
12762     case TARGET_NR_epoll_ctl:
12763     {
12764         struct epoll_event ep;
12765         struct epoll_event *epp = 0;
12766         if (arg4) {
12767             if (arg2 != EPOLL_CTL_DEL) {
12768                 struct target_epoll_event *target_ep;
12769                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12770                     return -TARGET_EFAULT;
12771                 }
12772                 ep.events = tswap32(target_ep->events);
12773                 /*
12774                  * The epoll_data_t union is just opaque data to the kernel,
12775                  * so we transfer all 64 bits across and need not worry what
12776                  * actual data type it is.
12777                  */
12778                 ep.data.u64 = tswap64(target_ep->data.u64);
12779                 unlock_user_struct(target_ep, arg4, 0);
12780             }
12781             /*
12782              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12783              * non-null pointer, even though this argument is ignored.
12784              *
12785              */
12786             epp = &ep;
12787         }
12788         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12789     }
12790 #endif
12791 
12792 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12793 #if defined(TARGET_NR_epoll_wait)
12794     case TARGET_NR_epoll_wait:
12795 #endif
12796 #if defined(TARGET_NR_epoll_pwait)
12797     case TARGET_NR_epoll_pwait:
12798 #endif
12799     {
12800         struct target_epoll_event *target_ep;
12801         struct epoll_event *ep;
12802         int epfd = arg1;
12803         int maxevents = arg3;
12804         int timeout = arg4;
12805 
12806         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12807             return -TARGET_EINVAL;
12808         }
12809 
12810         target_ep = lock_user(VERIFY_WRITE, arg2,
12811                               maxevents * sizeof(struct target_epoll_event), 1);
12812         if (!target_ep) {
12813             return -TARGET_EFAULT;
12814         }
12815 
12816         ep = g_try_new(struct epoll_event, maxevents);
12817         if (!ep) {
12818             unlock_user(target_ep, arg2, 0);
12819             return -TARGET_ENOMEM;
12820         }
12821 
12822         switch (num) {
12823 #if defined(TARGET_NR_epoll_pwait)
12824         case TARGET_NR_epoll_pwait:
12825         {
12826             sigset_t *set = NULL;
12827 
12828             if (arg5) {
12829                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12830                 if (ret != 0) {
12831                     break;
12832                 }
12833             }
12834 
12835             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12836                                              set, SIGSET_T_SIZE));
12837 
12838             if (set) {
12839                 finish_sigsuspend_mask(ret);
12840             }
12841             break;
12842         }
12843 #endif
12844 #if defined(TARGET_NR_epoll_wait)
12845         case TARGET_NR_epoll_wait:
12846             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12847                                              NULL, 0));
12848             break;
12849 #endif
12850         default:
12851             ret = -TARGET_ENOSYS;
12852         }
12853         if (!is_error(ret)) {
12854             int i;
12855             for (i = 0; i < ret; i++) {
12856                 target_ep[i].events = tswap32(ep[i].events);
12857                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12858             }
12859             unlock_user(target_ep, arg2,
12860                         ret * sizeof(struct target_epoll_event));
12861         } else {
12862             unlock_user(target_ep, arg2, 0);
12863         }
12864         g_free(ep);
12865         return ret;
12866     }
12867 #endif
12868 #endif
12869 #ifdef TARGET_NR_prlimit64
12870     case TARGET_NR_prlimit64:
12871     {
12872         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12873         struct target_rlimit64 *target_rnew, *target_rold;
12874         struct host_rlimit64 rnew, rold, *rnewp = 0;
12875         int resource = target_to_host_resource(arg2);
12876 
12877         if (arg3 && (resource != RLIMIT_AS &&
12878                      resource != RLIMIT_DATA &&
12879                      resource != RLIMIT_STACK)) {
12880             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12881                 return -TARGET_EFAULT;
12882             }
12883             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12884             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12885             unlock_user_struct(target_rnew, arg3, 0);
12886             rnewp = &rnew;
12887         }
12888 
12889         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12890         if (!is_error(ret) && arg4) {
12891             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12892                 return -TARGET_EFAULT;
12893             }
12894             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12895             target_rold->rlim_max = tswap64(rold.rlim_max);
12896             unlock_user_struct(target_rold, arg4, 1);
12897         }
12898         return ret;
12899     }
12900 #endif
12901 #ifdef TARGET_NR_gethostname
12902     case TARGET_NR_gethostname:
12903     {
12904         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12905         if (name) {
12906             ret = get_errno(gethostname(name, arg2));
12907             unlock_user(name, arg1, arg2);
12908         } else {
12909             ret = -TARGET_EFAULT;
12910         }
12911         return ret;
12912     }
12913 #endif
12914 #ifdef TARGET_NR_atomic_cmpxchg_32
12915     case TARGET_NR_atomic_cmpxchg_32:
12916     {
12917         /* should use start_exclusive from main.c */
12918         abi_ulong mem_value;
12919         if (get_user_u32(mem_value, arg6)) {
12920             target_siginfo_t info;
12921             info.si_signo = SIGSEGV;
12922             info.si_errno = 0;
12923             info.si_code = TARGET_SEGV_MAPERR;
12924             info._sifields._sigfault._addr = arg6;
12925             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12926             ret = 0xdeadbeef;
12927 
12928         }
12929         if (mem_value == arg2)
12930             put_user_u32(arg1, arg6);
12931         return mem_value;
12932     }
12933 #endif
12934 #ifdef TARGET_NR_atomic_barrier
12935     case TARGET_NR_atomic_barrier:
12936         /* Like the kernel implementation and the
12937            qemu arm barrier, no-op this? */
12938         return 0;
12939 #endif
12940 
12941 #ifdef TARGET_NR_timer_create
12942     case TARGET_NR_timer_create:
12943     {
12944         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12945 
12946         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12947 
12948         int clkid = arg1;
12949         int timer_index = next_free_host_timer();
12950 
12951         if (timer_index < 0) {
12952             ret = -TARGET_EAGAIN;
12953         } else {
12954             timer_t *phtimer = g_posix_timers  + timer_index;
12955 
12956             if (arg2) {
12957                 phost_sevp = &host_sevp;
12958                 ret = target_to_host_sigevent(phost_sevp, arg2);
12959                 if (ret != 0) {
12960                     free_host_timer_slot(timer_index);
12961                     return ret;
12962                 }
12963             }
12964 
12965             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12966             if (ret) {
12967                 free_host_timer_slot(timer_index);
12968             } else {
12969                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12970                     timer_delete(*phtimer);
12971                     free_host_timer_slot(timer_index);
12972                     return -TARGET_EFAULT;
12973                 }
12974             }
12975         }
12976         return ret;
12977     }
12978 #endif
12979 
12980 #ifdef TARGET_NR_timer_settime
12981     case TARGET_NR_timer_settime:
12982     {
12983         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12984          * struct itimerspec * old_value */
12985         target_timer_t timerid = get_timer_id(arg1);
12986 
12987         if (timerid < 0) {
12988             ret = timerid;
12989         } else if (arg3 == 0) {
12990             ret = -TARGET_EINVAL;
12991         } else {
12992             timer_t htimer = g_posix_timers[timerid];
12993             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12994 
12995             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12996                 return -TARGET_EFAULT;
12997             }
12998             ret = get_errno(
12999                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13000             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13001                 return -TARGET_EFAULT;
13002             }
13003         }
13004         return ret;
13005     }
13006 #endif
13007 
13008 #ifdef TARGET_NR_timer_settime64
13009     case TARGET_NR_timer_settime64:
13010     {
13011         target_timer_t timerid = get_timer_id(arg1);
13012 
13013         if (timerid < 0) {
13014             ret = timerid;
13015         } else if (arg3 == 0) {
13016             ret = -TARGET_EINVAL;
13017         } else {
13018             timer_t htimer = g_posix_timers[timerid];
13019             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13020 
13021             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13022                 return -TARGET_EFAULT;
13023             }
13024             ret = get_errno(
13025                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13026             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13027                 return -TARGET_EFAULT;
13028             }
13029         }
13030         return ret;
13031     }
13032 #endif
13033 
13034 #ifdef TARGET_NR_timer_gettime
13035     case TARGET_NR_timer_gettime:
13036     {
13037         /* args: timer_t timerid, struct itimerspec *curr_value */
13038         target_timer_t timerid = get_timer_id(arg1);
13039 
13040         if (timerid < 0) {
13041             ret = timerid;
13042         } else if (!arg2) {
13043             ret = -TARGET_EFAULT;
13044         } else {
13045             timer_t htimer = g_posix_timers[timerid];
13046             struct itimerspec hspec;
13047             ret = get_errno(timer_gettime(htimer, &hspec));
13048 
13049             if (host_to_target_itimerspec(arg2, &hspec)) {
13050                 ret = -TARGET_EFAULT;
13051             }
13052         }
13053         return ret;
13054     }
13055 #endif
13056 
13057 #ifdef TARGET_NR_timer_gettime64
13058     case TARGET_NR_timer_gettime64:
13059     {
13060         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13061         target_timer_t timerid = get_timer_id(arg1);
13062 
13063         if (timerid < 0) {
13064             ret = timerid;
13065         } else if (!arg2) {
13066             ret = -TARGET_EFAULT;
13067         } else {
13068             timer_t htimer = g_posix_timers[timerid];
13069             struct itimerspec hspec;
13070             ret = get_errno(timer_gettime(htimer, &hspec));
13071 
13072             if (host_to_target_itimerspec64(arg2, &hspec)) {
13073                 ret = -TARGET_EFAULT;
13074             }
13075         }
13076         return ret;
13077     }
13078 #endif
13079 
13080 #ifdef TARGET_NR_timer_getoverrun
13081     case TARGET_NR_timer_getoverrun:
13082     {
13083         /* args: timer_t timerid */
13084         target_timer_t timerid = get_timer_id(arg1);
13085 
13086         if (timerid < 0) {
13087             ret = timerid;
13088         } else {
13089             timer_t htimer = g_posix_timers[timerid];
13090             ret = get_errno(timer_getoverrun(htimer));
13091         }
13092         return ret;
13093     }
13094 #endif
13095 
13096 #ifdef TARGET_NR_timer_delete
13097     case TARGET_NR_timer_delete:
13098     {
13099         /* args: timer_t timerid */
13100         target_timer_t timerid = get_timer_id(arg1);
13101 
13102         if (timerid < 0) {
13103             ret = timerid;
13104         } else {
13105             timer_t htimer = g_posix_timers[timerid];
13106             ret = get_errno(timer_delete(htimer));
13107             free_host_timer_slot(timerid);
13108         }
13109         return ret;
13110     }
13111 #endif
13112 
13113 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13114     case TARGET_NR_timerfd_create:
13115         return get_errno(timerfd_create(arg1,
13116                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13117 #endif
13118 
13119 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13120     case TARGET_NR_timerfd_gettime:
13121         {
13122             struct itimerspec its_curr;
13123 
13124             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13125 
13126             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13127                 return -TARGET_EFAULT;
13128             }
13129         }
13130         return ret;
13131 #endif
13132 
13133 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13134     case TARGET_NR_timerfd_gettime64:
13135         {
13136             struct itimerspec its_curr;
13137 
13138             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13139 
13140             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13141                 return -TARGET_EFAULT;
13142             }
13143         }
13144         return ret;
13145 #endif
13146 
13147 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13148     case TARGET_NR_timerfd_settime:
13149         {
13150             struct itimerspec its_new, its_old, *p_new;
13151 
13152             if (arg3) {
13153                 if (target_to_host_itimerspec(&its_new, arg3)) {
13154                     return -TARGET_EFAULT;
13155                 }
13156                 p_new = &its_new;
13157             } else {
13158                 p_new = NULL;
13159             }
13160 
13161             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13162 
13163             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13164                 return -TARGET_EFAULT;
13165             }
13166         }
13167         return ret;
13168 #endif
13169 
13170 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13171     case TARGET_NR_timerfd_settime64:
13172         {
13173             struct itimerspec its_new, its_old, *p_new;
13174 
13175             if (arg3) {
13176                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13177                     return -TARGET_EFAULT;
13178                 }
13179                 p_new = &its_new;
13180             } else {
13181                 p_new = NULL;
13182             }
13183 
13184             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13185 
13186             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13187                 return -TARGET_EFAULT;
13188             }
13189         }
13190         return ret;
13191 #endif
13192 
13193 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13194     case TARGET_NR_ioprio_get:
13195         return get_errno(ioprio_get(arg1, arg2));
13196 #endif
13197 
13198 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13199     case TARGET_NR_ioprio_set:
13200         return get_errno(ioprio_set(arg1, arg2, arg3));
13201 #endif
13202 
13203 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13204     case TARGET_NR_setns:
13205         return get_errno(setns(arg1, arg2));
13206 #endif
13207 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13208     case TARGET_NR_unshare:
13209         return get_errno(unshare(arg1));
13210 #endif
13211 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13212     case TARGET_NR_kcmp:
13213         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13214 #endif
13215 #ifdef TARGET_NR_swapcontext
13216     case TARGET_NR_swapcontext:
13217         /* PowerPC specific.  */
13218         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13219 #endif
13220 #ifdef TARGET_NR_memfd_create
13221     case TARGET_NR_memfd_create:
13222         p = lock_user_string(arg1);
13223         if (!p) {
13224             return -TARGET_EFAULT;
13225         }
13226         ret = get_errno(memfd_create(p, arg2));
13227         fd_trans_unregister(ret);
13228         unlock_user(p, arg1, 0);
13229         return ret;
13230 #endif
13231 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13232     case TARGET_NR_membarrier:
13233         return get_errno(membarrier(arg1, arg2));
13234 #endif
13235 
13236 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13237     case TARGET_NR_copy_file_range:
13238         {
13239             loff_t inoff, outoff;
13240             loff_t *pinoff = NULL, *poutoff = NULL;
13241 
13242             if (arg2) {
13243                 if (get_user_u64(inoff, arg2)) {
13244                     return -TARGET_EFAULT;
13245                 }
13246                 pinoff = &inoff;
13247             }
13248             if (arg4) {
13249                 if (get_user_u64(outoff, arg4)) {
13250                     return -TARGET_EFAULT;
13251                 }
13252                 poutoff = &outoff;
13253             }
13254             /* Do not sign-extend the count parameter. */
13255             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13256                                                  (abi_ulong)arg5, arg6));
13257             if (!is_error(ret) && ret > 0) {
13258                 if (arg2) {
13259                     if (put_user_u64(inoff, arg2)) {
13260                         return -TARGET_EFAULT;
13261                     }
13262                 }
13263                 if (arg4) {
13264                     if (put_user_u64(outoff, arg4)) {
13265                         return -TARGET_EFAULT;
13266                     }
13267                 }
13268             }
13269         }
13270         return ret;
13271 #endif
13272 
13273 #if defined(TARGET_NR_pivot_root)
13274     case TARGET_NR_pivot_root:
13275         {
13276             void *p2;
13277             p = lock_user_string(arg1); /* new_root */
13278             p2 = lock_user_string(arg2); /* put_old */
13279             if (!p || !p2) {
13280                 ret = -TARGET_EFAULT;
13281             } else {
13282                 ret = get_errno(pivot_root(p, p2));
13283             }
13284             unlock_user(p2, arg2, 0);
13285             unlock_user(p, arg1, 0);
13286         }
13287         return ret;
13288 #endif
13289 
13290     default:
13291         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13292         return -TARGET_ENOSYS;
13293     }
13294     return ret;
13295 }
13296 
13297 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13298                     abi_long arg2, abi_long arg3, abi_long arg4,
13299                     abi_long arg5, abi_long arg6, abi_long arg7,
13300                     abi_long arg8)
13301 {
13302     CPUState *cpu = env_cpu(cpu_env);
13303     abi_long ret;
13304 
13305 #ifdef DEBUG_ERESTARTSYS
13306     /* Debug-only code for exercising the syscall-restart code paths
13307      * in the per-architecture cpu main loops: restart every syscall
13308      * the guest makes once before letting it through.
13309      */
13310     {
13311         static bool flag;
13312         flag = !flag;
13313         if (flag) {
13314             return -QEMU_ERESTARTSYS;
13315         }
13316     }
13317 #endif
13318 
13319     record_syscall_start(cpu, num, arg1,
13320                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13321 
13322     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13323         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13324     }
13325 
13326     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13327                       arg5, arg6, arg7, arg8);
13328 
13329     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13330         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13331                           arg3, arg4, arg5, arg6);
13332     }
13333 
13334     record_syscall_return(cpu, num, ret);
13335     return ret;
13336 }
13337