xref: /openbmc/qemu/linux-user/syscall.c (revision 5a572dd2cb4c96e51c7c0d6a549050ed33dea269)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include <elf.h>
31 #include <endian.h>
32 #include <grp.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/mount.h>
37 #include <sys/file.h>
38 #include <sys/fsuid.h>
39 #include <sys/personality.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
42 #include <sys/swap.h>
43 #include <linux/capability.h>
44 #include <sched.h>
45 #include <sys/timex.h>
46 #include <sys/socket.h>
47 #include <linux/sockios.h>
48 #include <sys/un.h>
49 #include <sys/uio.h>
50 #include <poll.h>
51 #include <sys/times.h>
52 #include <sys/shm.h>
53 #include <sys/sem.h>
54 #include <sys/statfs.h>
55 #include <utime.h>
56 #include <sys/sysinfo.h>
57 #include <sys/signalfd.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/page-protection.h"
140 #include "user/safe-syscall.h"
141 #include "user/signal.h"
142 #include "qemu/guest-random.h"
143 #include "qemu/selfmap.h"
144 #include "user/syscall-trace.h"
145 #include "special-errno.h"
146 #include "qapi/error.h"
147 #include "fd-trans.h"
148 #include "user/cpu_loop.h"
149 
150 #ifndef CLONE_IO
151 #define CLONE_IO                0x80000000      /* Clone io context */
152 #endif
153 
154 /* We can't directly call the host clone syscall, because this will
155  * badly confuse libc (breaking mutexes, for example). So we must
156  * divide clone flags into:
157  *  * flag combinations that look like pthread_create()
158  *  * flag combinations that look like fork()
159  *  * flags we can implement within QEMU itself
160  *  * flags we can't support and will return an error for
161  */
162 /* For thread creation, all these flags must be present; for
163  * fork, none must be present.
164  */
165 #define CLONE_THREAD_FLAGS                              \
166     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
167      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
168 
169 /* These flags are ignored:
170  * CLONE_DETACHED is now ignored by the kernel;
171  * CLONE_IO is just an optimisation hint to the I/O scheduler
172  */
173 #define CLONE_IGNORED_FLAGS                     \
174     (CLONE_DETACHED | CLONE_IO)
175 
176 #ifndef CLONE_PIDFD
177 # define CLONE_PIDFD 0x00001000
178 #endif
179 
180 /* Flags for fork which we can implement within QEMU itself */
181 #define CLONE_OPTIONAL_FORK_FLAGS               \
182     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
183      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
184 
185 /* Flags for thread creation which we can implement within QEMU itself */
186 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
187     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
188      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
189 
190 #define CLONE_INVALID_FORK_FLAGS                                        \
191     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
192 
193 #define CLONE_INVALID_THREAD_FLAGS                                      \
194     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
195        CLONE_IGNORED_FLAGS))
196 
197 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
198  * have almost all been allocated. We cannot support any of
199  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
200  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
201  * The checks against the invalid thread masks above will catch these.
202  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
203  */
204 
205 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
206  * once. This exercises the codepaths for restart.
207  */
208 //#define DEBUG_ERESTARTSYS
209 
210 //#include <linux/msdos_fs.h>
211 #define VFAT_IOCTL_READDIR_BOTH \
212     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
213 #define VFAT_IOCTL_READDIR_SHORT \
214     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
215 
216 #undef _syscall0
217 #undef _syscall1
218 #undef _syscall2
219 #undef _syscall3
220 #undef _syscall4
221 #undef _syscall5
222 #undef _syscall6
223 
224 #define _syscall0(type,name)		\
225 static type name (void)			\
226 {					\
227 	return syscall(__NR_##name);	\
228 }
229 
230 #define _syscall1(type,name,type1,arg1)		\
231 static type name (type1 arg1)			\
232 {						\
233 	return syscall(__NR_##name, arg1);	\
234 }
235 
236 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
237 static type name (type1 arg1,type2 arg2)		\
238 {							\
239 	return syscall(__NR_##name, arg1, arg2);	\
240 }
241 
242 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
243 static type name (type1 arg1,type2 arg2,type3 arg3)		\
244 {								\
245 	return syscall(__NR_##name, arg1, arg2, arg3);		\
246 }
247 
248 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
249 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
250 {										\
251 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
252 }
253 
254 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
255 		  type5,arg5)							\
256 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
259 }
260 
261 
262 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
263 		  type5,arg5,type6,arg6)					\
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
265                   type6 arg6)							\
266 {										\
267 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
268 }
269 
270 
271 #define __NR_sys_uname __NR_uname
272 #define __NR_sys_getcwd1 __NR_getcwd
273 #define __NR_sys_getdents __NR_getdents
274 #define __NR_sys_getdents64 __NR_getdents64
275 #define __NR_sys_getpriority __NR_getpriority
276 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
277 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
278 #define __NR_sys_syslog __NR_syslog
279 #if defined(__NR_futex)
280 # define __NR_sys_futex __NR_futex
281 #endif
282 #if defined(__NR_futex_time64)
283 # define __NR_sys_futex_time64 __NR_futex_time64
284 #endif
285 #define __NR_sys_statx __NR_statx
286 
287 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
288 #define __NR__llseek __NR_lseek
289 #endif
290 
291 /* Newer kernel ports have llseek() instead of _llseek() */
292 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
293 #define TARGET_NR__llseek TARGET_NR_llseek
294 #endif
295 
296 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
297 #ifndef TARGET_O_NONBLOCK_MASK
298 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
299 #endif
300 
301 #define __NR_sys_gettid __NR_gettid
302 _syscall0(int, sys_gettid)
303 
304 /* For the 64-bit guest on 32-bit host case we must emulate
305  * getdents using getdents64, because otherwise the host
306  * might hand us back more dirent records than we can fit
307  * into the guest buffer after structure format conversion.
308  * Otherwise we emulate getdents with getdents if the host has it.
309  */
310 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
311 #define EMULATE_GETDENTS_WITH_GETDENTS
312 #endif
313 
314 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
315 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
316 #endif
317 #if (defined(TARGET_NR_getdents) && \
318       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
319     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
320 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
321 #endif
322 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
323 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
324           loff_t *, res, unsigned int, wh);
325 #endif
326 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
327 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
328           siginfo_t *, uinfo)
329 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
330 #ifdef __NR_exit_group
331 _syscall1(int,exit_group,int,error_code)
332 #endif
333 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
334 #define __NR_sys_close_range __NR_close_range
335 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
336 #ifndef CLOSE_RANGE_CLOEXEC
337 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
338 #endif
339 #endif
340 #if defined(__NR_futex)
341 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
342           const struct timespec *,timeout,int *,uaddr2,int,val3)
343 #endif
344 #if defined(__NR_futex_time64)
345 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
346           const struct timespec *,timeout,int *,uaddr2,int,val3)
347 #endif
348 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
349 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
350 #endif
351 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
352 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
353                              unsigned int, flags);
354 #endif
355 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
356 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
357 #endif
358 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
359 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
362 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
363           unsigned long *, user_mask_ptr);
364 /* sched_attr is not defined in glibc < 2.41 */
365 #ifndef SCHED_ATTR_SIZE_VER0
366 struct sched_attr {
367     uint32_t size;
368     uint32_t sched_policy;
369     uint64_t sched_flags;
370     int32_t sched_nice;
371     uint32_t sched_priority;
372     uint64_t sched_runtime;
373     uint64_t sched_deadline;
374     uint64_t sched_period;
375     uint32_t sched_util_min;
376     uint32_t sched_util_max;
377 };
378 #endif
379 #define __NR_sys_sched_getattr __NR_sched_getattr
380 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
381           unsigned int, size, unsigned int, flags);
382 #define __NR_sys_sched_setattr __NR_sched_setattr
383 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
384           unsigned int, flags);
385 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
386 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
387 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
388 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
389           const struct sched_param *, param);
390 #define __NR_sys_sched_getparam __NR_sched_getparam
391 _syscall2(int, sys_sched_getparam, pid_t, pid,
392           struct sched_param *, param);
393 #define __NR_sys_sched_setparam __NR_sched_setparam
394 _syscall2(int, sys_sched_setparam, pid_t, pid,
395           const struct sched_param *, param);
396 #define __NR_sys_getcpu __NR_getcpu
397 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
398 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
399           void *, arg);
400 _syscall2(int, capget, struct __user_cap_header_struct *, header,
401           struct __user_cap_data_struct *, data);
402 _syscall2(int, capset, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
405 _syscall2(int, ioprio_get, int, which, int, who)
406 #endif
407 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
408 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
409 #endif
410 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
411 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
412 #endif
413 
414 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
415 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
416           unsigned long, idx1, unsigned long, idx2)
417 #endif
418 
419 /*
420  * It is assumed that struct statx is architecture independent.
421  */
422 #if defined(TARGET_NR_statx) && defined(__NR_statx)
423 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
424           unsigned int, mask, struct target_statx *, statxbuf)
425 #endif
426 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
427 _syscall2(int, membarrier, int, cmd, int, flags)
428 #endif
429 
430 static const bitmask_transtbl fcntl_flags_tbl[] = {
431   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
432   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
433   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
434   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
435   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
436   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
437   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
438   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
439   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
440   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
441   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
442   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
443   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
444 #if defined(O_DIRECT)
445   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
446 #endif
447 #if defined(O_NOATIME)
448   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
449 #endif
450 #if defined(O_CLOEXEC)
451   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
452 #endif
453 #if defined(O_PATH)
454   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
455 #endif
456 #if defined(O_TMPFILE)
457   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
458 #endif
459   /* Don't terminate the list prematurely on 64-bit host+guest.  */
460 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
461   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
462 #endif
463 };
464 
465 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
466 
467 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
468 #if defined(__NR_utimensat)
469 #define __NR_sys_utimensat __NR_utimensat
470 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
471           const struct timespec *,tsp,int,flags)
472 #else
473 static int sys_utimensat(int dirfd, const char *pathname,
474                          const struct timespec times[2], int flags)
475 {
476     errno = ENOSYS;
477     return -1;
478 }
479 #endif
480 #endif /* TARGET_NR_utimensat */
481 
482 #ifdef TARGET_NR_renameat2
483 #if defined(__NR_renameat2)
484 #define __NR_sys_renameat2 __NR_renameat2
485 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
486           const char *, new, unsigned int, flags)
487 #else
488 static int sys_renameat2(int oldfd, const char *old,
489                          int newfd, const char *new, int flags)
490 {
491     if (flags == 0) {
492         return renameat(oldfd, old, newfd, new);
493     }
494     errno = ENOSYS;
495     return -1;
496 }
497 #endif
498 #endif /* TARGET_NR_renameat2 */
499 
500 #ifdef CONFIG_INOTIFY
501 #include <sys/inotify.h>
502 #else
503 /* Userspace can usually survive runtime without inotify */
504 #undef TARGET_NR_inotify_init
505 #undef TARGET_NR_inotify_init1
506 #undef TARGET_NR_inotify_add_watch
507 #undef TARGET_NR_inotify_rm_watch
508 #endif /* CONFIG_INOTIFY  */
509 
510 #if defined(TARGET_NR_prlimit64)
511 #ifndef __NR_prlimit64
512 # define __NR_prlimit64 -1
513 #endif
514 #define __NR_sys_prlimit64 __NR_prlimit64
515 /* The glibc rlimit structure may not be that used by the underlying syscall */
516 struct host_rlimit64 {
517     uint64_t rlim_cur;
518     uint64_t rlim_max;
519 };
520 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
521           const struct host_rlimit64 *, new_limit,
522           struct host_rlimit64 *, old_limit)
523 #endif
524 
525 
526 #if defined(TARGET_NR_timer_create)
527 /* Maximum of 32 active POSIX timers allowed at any one time. */
528 #define GUEST_TIMER_MAX 32
529 static timer_t g_posix_timers[GUEST_TIMER_MAX];
530 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
531 
532 static inline int next_free_host_timer(void)
533 {
534     int k;
535     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
536         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
537             return k;
538         }
539     }
540     return -1;
541 }
542 
543 static inline void free_host_timer_slot(int id)
544 {
545     qatomic_store_release(g_posix_timer_allocated + id, 0);
546 }
547 #endif
548 
549 static inline int host_to_target_errno(int host_errno)
550 {
551     switch (host_errno) {
552 #define E(X)  case X: return TARGET_##X;
553 #include "errnos.c.inc"
554 #undef E
555     default:
556         return host_errno;
557     }
558 }
559 
560 static inline int target_to_host_errno(int target_errno)
561 {
562     switch (target_errno) {
563 #define E(X)  case TARGET_##X: return X;
564 #include "errnos.c.inc"
565 #undef E
566     default:
567         return target_errno;
568     }
569 }
570 
571 abi_long get_errno(abi_long ret)
572 {
573     if (ret == -1)
574         return -host_to_target_errno(errno);
575     else
576         return ret;
577 }
578 
579 const char *target_strerror(int err)
580 {
581     if (err == QEMU_ERESTARTSYS) {
582         return "To be restarted";
583     }
584     if (err == QEMU_ESIGRETURN) {
585         return "Successful exit from sigreturn";
586     }
587 
588     return strerror(target_to_host_errno(err));
589 }
590 
591 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
592 {
593     int i;
594     uint8_t b;
595     if (usize <= ksize) {
596         return 1;
597     }
598     for (i = ksize; i < usize; i++) {
599         if (get_user_u8(b, addr + i)) {
600             return -TARGET_EFAULT;
601         }
602         if (b != 0) {
603             return 0;
604         }
605     }
606     return 1;
607 }
608 
609 /*
610  * Copies a target struct to a host struct, in a way that guarantees
611  * backwards-compatibility for struct syscall arguments.
612  *
613  * Similar to kernels uaccess.h:copy_struct_from_user()
614  */
615 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
616 {
617     size_t size = MIN(ksize, usize);
618     size_t rest = MAX(ksize, usize) - size;
619 
620     /* Deal with trailing bytes. */
621     if (usize < ksize) {
622         memset(dst + size, 0, rest);
623     } else if (usize > ksize) {
624         int ret = check_zeroed_user(src, ksize, usize);
625         if (ret <= 0) {
626             return ret ?: -TARGET_E2BIG;
627         }
628     }
629     /* Copy the interoperable parts of the struct. */
630     if (copy_from_user(dst, src, size)) {
631         return -TARGET_EFAULT;
632     }
633     return 0;
634 }
635 
636 #define safe_syscall0(type, name) \
637 static type safe_##name(void) \
638 { \
639     return safe_syscall(__NR_##name); \
640 }
641 
642 #define safe_syscall1(type, name, type1, arg1) \
643 static type safe_##name(type1 arg1) \
644 { \
645     return safe_syscall(__NR_##name, arg1); \
646 }
647 
648 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
649 static type safe_##name(type1 arg1, type2 arg2) \
650 { \
651     return safe_syscall(__NR_##name, arg1, arg2); \
652 }
653 
654 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
655 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
656 { \
657     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
658 }
659 
660 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
661     type4, arg4) \
662 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
663 { \
664     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
665 }
666 
667 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
668     type4, arg4, type5, arg5) \
669 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
670     type5 arg5) \
671 { \
672     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
673 }
674 
675 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
676     type4, arg4, type5, arg5, type6, arg6) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678     type5 arg5, type6 arg6) \
679 { \
680     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
681 }
682 
683 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
684 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
685 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
686               int, flags, mode_t, mode)
687 
688 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
689               const struct open_how_ver0 *, how, size_t, size)
690 
691 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
692 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
693               struct rusage *, rusage)
694 #endif
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696               int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
699               char **, argv, char **, envp, int, flags)
700 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
701     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
702 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
703               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
704 #endif
705 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
706 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
707               struct timespec *, tsp, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #endif
710 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
711               int, maxevents, int, timeout, const sigset_t *, sigmask,
712               size_t, sigsetsize)
713 #if defined(__NR_futex)
714 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 #if defined(__NR_futex_time64)
718 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
719               const struct timespec *,timeout,int *,uaddr2,int,val3)
720 #endif
721 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
722 safe_syscall2(int, kill, pid_t, pid, int, sig)
723 safe_syscall2(int, tkill, int, tid, int, sig)
724 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
725 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
726 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
727 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
728               unsigned long, pos_l, unsigned long, pos_h)
729 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
732               socklen_t, addrlen)
733 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
734               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
735 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
736               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
737 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
738 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
739 safe_syscall2(int, flock, int, fd, int, operation)
740 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
741 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
742               const struct timespec *, uts, size_t, sigsetsize)
743 #endif
744 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
745               int, flags)
746 #if defined(TARGET_NR_nanosleep)
747 safe_syscall2(int, nanosleep, const struct timespec *, req,
748               struct timespec *, rem)
749 #endif
750 #if defined(TARGET_NR_clock_nanosleep) || \
751     defined(TARGET_NR_clock_nanosleep_time64)
752 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
753               const struct timespec *, req, struct timespec *, rem)
754 #endif
755 #ifdef __NR_ipc
756 #ifdef __s390x__
757 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
758               void *, ptr)
759 #else
760 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
761               void *, ptr, long, fifth)
762 #endif
763 #endif
764 #ifdef __NR_msgsnd
765 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
766               int, flags)
767 #endif
768 #ifdef __NR_msgrcv
769 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
770               long, msgtype, int, flags)
771 #endif
772 #ifdef __NR_semtimedop
773 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
774               unsigned, nsops, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedsend) || \
777     defined(TARGET_NR_mq_timedsend_time64)
778 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
779               size_t, len, unsigned, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_mq_timedreceive) || \
782     defined(TARGET_NR_mq_timedreceive_time64)
783 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
784               size_t, len, unsigned *, prio, const struct timespec *, timeout)
785 #endif
786 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
787 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
788               int, outfd, loff_t *, poutoff, size_t, length,
789               unsigned int, flags)
790 #endif
791 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
792 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
793               unsigned short, mode, unsigned int, flags)
794 #endif
795 
796 /* We do ioctl like this rather than via safe_syscall3 to preserve the
797  * "third argument might be integer or pointer or not present" behaviour of
798  * the libc function.
799  */
800 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
801 /* Similarly for fcntl. Since we always build with LFS enabled,
802  * we should be using the 64-bit structures automatically.
803  */
804 #ifdef __NR_fcntl64
805 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
806 #else
807 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
808 #endif
809 
810 static inline int host_to_target_sock_type(int host_type)
811 {
812     int target_type;
813 
814     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
815     case SOCK_DGRAM:
816         target_type = TARGET_SOCK_DGRAM;
817         break;
818     case SOCK_STREAM:
819         target_type = TARGET_SOCK_STREAM;
820         break;
821     default:
822         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
823         break;
824     }
825 
826 #if defined(SOCK_CLOEXEC)
827     if (host_type & SOCK_CLOEXEC) {
828         target_type |= TARGET_SOCK_CLOEXEC;
829     }
830 #endif
831 
832 #if defined(SOCK_NONBLOCK)
833     if (host_type & SOCK_NONBLOCK) {
834         target_type |= TARGET_SOCK_NONBLOCK;
835     }
836 #endif
837 
838     return target_type;
839 }
840 
841 static abi_ulong target_brk, initial_target_brk;
842 
843 void target_set_brk(abi_ulong new_brk)
844 {
845     target_brk = TARGET_PAGE_ALIGN(new_brk);
846     initial_target_brk = target_brk;
847 }
848 
849 /* do_brk() must return target values and target errnos. */
850 abi_long do_brk(abi_ulong brk_val)
851 {
852     abi_long mapped_addr;
853     abi_ulong new_brk;
854     abi_ulong old_brk;
855 
856     /* brk pointers are always untagged */
857 
858     /* do not allow to shrink below initial brk value */
859     if (brk_val < initial_target_brk) {
860         return target_brk;
861     }
862 
863     new_brk = TARGET_PAGE_ALIGN(brk_val);
864     old_brk = TARGET_PAGE_ALIGN(target_brk);
865 
866     /* new and old target_brk might be on the same page */
867     if (new_brk == old_brk) {
868         target_brk = brk_val;
869         return target_brk;
870     }
871 
872     /* Release heap if necessary */
873     if (new_brk < old_brk) {
874         target_munmap(new_brk, old_brk - new_brk);
875 
876         target_brk = brk_val;
877         return target_brk;
878     }
879 
880     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
881                               PROT_READ | PROT_WRITE,
882                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
883                               -1, 0);
884 
885     if (mapped_addr == old_brk) {
886         target_brk = brk_val;
887         return target_brk;
888     }
889 
890 #if defined(TARGET_ALPHA)
891     /* We (partially) emulate OSF/1 on Alpha, which requires we
892        return a proper errno, not an unchanged brk value.  */
893     return -TARGET_ENOMEM;
894 #endif
895     /* For everything else, return the previous break. */
896     return target_brk;
897 }
898 
899 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
900     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
901 static inline abi_long copy_from_user_fdset(fd_set *fds,
902                                             abi_ulong target_fds_addr,
903                                             int n)
904 {
905     int i, nw, j, k;
906     abi_ulong b, *target_fds;
907 
908     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
909     if (!(target_fds = lock_user(VERIFY_READ,
910                                  target_fds_addr,
911                                  sizeof(abi_ulong) * nw,
912                                  1)))
913         return -TARGET_EFAULT;
914 
915     FD_ZERO(fds);
916     k = 0;
917     for (i = 0; i < nw; i++) {
918         /* grab the abi_ulong */
919         __get_user(b, &target_fds[i]);
920         for (j = 0; j < TARGET_ABI_BITS; j++) {
921             /* check the bit inside the abi_ulong */
922             if ((b >> j) & 1)
923                 FD_SET(k, fds);
924             k++;
925         }
926     }
927 
928     unlock_user(target_fds, target_fds_addr, 0);
929 
930     return 0;
931 }
932 
933 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
934                                                  abi_ulong target_fds_addr,
935                                                  int n)
936 {
937     if (target_fds_addr) {
938         if (copy_from_user_fdset(fds, target_fds_addr, n))
939             return -TARGET_EFAULT;
940         *fds_ptr = fds;
941     } else {
942         *fds_ptr = NULL;
943     }
944     return 0;
945 }
946 
947 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
948                                           const fd_set *fds,
949                                           int n)
950 {
951     int i, nw, j, k;
952     abi_long v;
953     abi_ulong *target_fds;
954 
955     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956     if (!(target_fds = lock_user(VERIFY_WRITE,
957                                  target_fds_addr,
958                                  sizeof(abi_ulong) * nw,
959                                  0)))
960         return -TARGET_EFAULT;
961 
962     k = 0;
963     for (i = 0; i < nw; i++) {
964         v = 0;
965         for (j = 0; j < TARGET_ABI_BITS; j++) {
966             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
967             k++;
968         }
969         __put_user(v, &target_fds[i]);
970     }
971 
972     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
973 
974     return 0;
975 }
976 #endif
977 
978 #if defined(__alpha__)
979 #define HOST_HZ 1024
980 #else
981 #define HOST_HZ 100
982 #endif
983 
984 static inline abi_long host_to_target_clock_t(long ticks)
985 {
986 #if HOST_HZ == TARGET_HZ
987     return ticks;
988 #else
989     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
990 #endif
991 }
992 
993 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
994                                              const struct rusage *rusage)
995 {
996     struct target_rusage *target_rusage;
997 
998     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
999         return -TARGET_EFAULT;
1000     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1001     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1002     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1003     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1004     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1005     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1006     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1007     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1008     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1009     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1010     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1011     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1012     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1013     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1014     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1015     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1016     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1017     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1018     unlock_user_struct(target_rusage, target_addr, 1);
1019 
1020     return 0;
1021 }
1022 
1023 #ifdef TARGET_NR_setrlimit
1024 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1025 {
1026     abi_ulong target_rlim_swap;
1027     rlim_t result;
1028 
1029     target_rlim_swap = tswapal(target_rlim);
1030     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1031         return RLIM_INFINITY;
1032 
1033     result = target_rlim_swap;
1034     if (target_rlim_swap != (rlim_t)result)
1035         return RLIM_INFINITY;
1036 
1037     return result;
1038 }
1039 #endif
1040 
1041 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1042 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1043 {
1044     abi_ulong target_rlim_swap;
1045     abi_ulong result;
1046 
1047     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1048         target_rlim_swap = TARGET_RLIM_INFINITY;
1049     else
1050         target_rlim_swap = rlim;
1051     result = tswapal(target_rlim_swap);
1052 
1053     return result;
1054 }
1055 #endif
1056 
1057 static inline int target_to_host_resource(int code)
1058 {
1059     switch (code) {
1060     case TARGET_RLIMIT_AS:
1061         return RLIMIT_AS;
1062     case TARGET_RLIMIT_CORE:
1063         return RLIMIT_CORE;
1064     case TARGET_RLIMIT_CPU:
1065         return RLIMIT_CPU;
1066     case TARGET_RLIMIT_DATA:
1067         return RLIMIT_DATA;
1068     case TARGET_RLIMIT_FSIZE:
1069         return RLIMIT_FSIZE;
1070     case TARGET_RLIMIT_LOCKS:
1071         return RLIMIT_LOCKS;
1072     case TARGET_RLIMIT_MEMLOCK:
1073         return RLIMIT_MEMLOCK;
1074     case TARGET_RLIMIT_MSGQUEUE:
1075         return RLIMIT_MSGQUEUE;
1076     case TARGET_RLIMIT_NICE:
1077         return RLIMIT_NICE;
1078     case TARGET_RLIMIT_NOFILE:
1079         return RLIMIT_NOFILE;
1080     case TARGET_RLIMIT_NPROC:
1081         return RLIMIT_NPROC;
1082     case TARGET_RLIMIT_RSS:
1083         return RLIMIT_RSS;
1084     case TARGET_RLIMIT_RTPRIO:
1085         return RLIMIT_RTPRIO;
1086 #ifdef RLIMIT_RTTIME
1087     case TARGET_RLIMIT_RTTIME:
1088         return RLIMIT_RTTIME;
1089 #endif
1090     case TARGET_RLIMIT_SIGPENDING:
1091         return RLIMIT_SIGPENDING;
1092     case TARGET_RLIMIT_STACK:
1093         return RLIMIT_STACK;
1094     default:
1095         return code;
1096     }
1097 }
1098 
1099 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1100                                               abi_ulong target_tv_addr)
1101 {
1102     struct target_timeval *target_tv;
1103 
1104     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1105         return -TARGET_EFAULT;
1106     }
1107 
1108     __get_user(tv->tv_sec, &target_tv->tv_sec);
1109     __get_user(tv->tv_usec, &target_tv->tv_usec);
1110 
1111     unlock_user_struct(target_tv, target_tv_addr, 0);
1112 
1113     return 0;
1114 }
1115 
1116 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1117                                             const struct timeval *tv)
1118 {
1119     struct target_timeval *target_tv;
1120 
1121     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1122         return -TARGET_EFAULT;
1123     }
1124 
1125     __put_user(tv->tv_sec, &target_tv->tv_sec);
1126     __put_user(tv->tv_usec, &target_tv->tv_usec);
1127 
1128     unlock_user_struct(target_tv, target_tv_addr, 1);
1129 
1130     return 0;
1131 }
1132 
1133 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1134 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1135                                                 abi_ulong target_tv_addr)
1136 {
1137     struct target__kernel_sock_timeval *target_tv;
1138 
1139     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1140         return -TARGET_EFAULT;
1141     }
1142 
1143     __get_user(tv->tv_sec, &target_tv->tv_sec);
1144     __get_user(tv->tv_usec, &target_tv->tv_usec);
1145 
1146     unlock_user_struct(target_tv, target_tv_addr, 0);
1147 
1148     return 0;
1149 }
1150 #endif
1151 
1152 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1153                                               const struct timeval *tv)
1154 {
1155     struct target__kernel_sock_timeval *target_tv;
1156 
1157     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1158         return -TARGET_EFAULT;
1159     }
1160 
1161     __put_user(tv->tv_sec, &target_tv->tv_sec);
1162     __put_user(tv->tv_usec, &target_tv->tv_usec);
1163 
1164     unlock_user_struct(target_tv, target_tv_addr, 1);
1165 
1166     return 0;
1167 }
1168 
1169 #if defined(TARGET_NR_futex) || \
1170     defined(TARGET_NR_rt_sigtimedwait) || \
1171     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1172     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1173     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1174     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1175     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1176     defined(TARGET_NR_timer_settime) || \
1177     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1178 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1179                                                abi_ulong target_addr)
1180 {
1181     struct target_timespec *target_ts;
1182 
1183     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1184         return -TARGET_EFAULT;
1185     }
1186     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1187     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1188     unlock_user_struct(target_ts, target_addr, 0);
1189     return 0;
1190 }
1191 #endif
1192 
1193 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1194     defined(TARGET_NR_timer_settime64) || \
1195     defined(TARGET_NR_mq_timedsend_time64) || \
1196     defined(TARGET_NR_mq_timedreceive_time64) || \
1197     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1198     defined(TARGET_NR_clock_nanosleep_time64) || \
1199     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1200     defined(TARGET_NR_utimensat) || \
1201     defined(TARGET_NR_utimensat_time64) || \
1202     defined(TARGET_NR_semtimedop_time64) || \
1203     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1204 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1205                                                  abi_ulong target_addr)
1206 {
1207     struct target__kernel_timespec *target_ts;
1208 
1209     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210         return -TARGET_EFAULT;
1211     }
1212     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214     /* in 32bit mode, this drops the padding */
1215     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1216     unlock_user_struct(target_ts, target_addr, 0);
1217     return 0;
1218 }
1219 #endif
1220 
1221 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1222                                                struct timespec *host_ts)
1223 {
1224     struct target_timespec *target_ts;
1225 
1226     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1227         return -TARGET_EFAULT;
1228     }
1229     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1230     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1231     unlock_user_struct(target_ts, target_addr, 1);
1232     return 0;
1233 }
1234 
1235 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1236                                                  struct timespec *host_ts)
1237 {
1238     struct target__kernel_timespec *target_ts;
1239 
1240     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1241         return -TARGET_EFAULT;
1242     }
1243     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1244     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1245     unlock_user_struct(target_ts, target_addr, 1);
1246     return 0;
1247 }
1248 
1249 #if defined(TARGET_NR_gettimeofday)
1250 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1251                                              struct timezone *tz)
1252 {
1253     struct target_timezone *target_tz;
1254 
1255     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1256         return -TARGET_EFAULT;
1257     }
1258 
1259     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1260     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1261 
1262     unlock_user_struct(target_tz, target_tz_addr, 1);
1263 
1264     return 0;
1265 }
1266 #endif
1267 
1268 #if defined(TARGET_NR_settimeofday)
1269 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1270                                                abi_ulong target_tz_addr)
1271 {
1272     struct target_timezone *target_tz;
1273 
1274     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1275         return -TARGET_EFAULT;
1276     }
1277 
1278     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1279     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1280 
1281     unlock_user_struct(target_tz, target_tz_addr, 0);
1282 
1283     return 0;
1284 }
1285 #endif
1286 
1287 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1288 #include <mqueue.h>
1289 
1290 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1291                                               abi_ulong target_mq_attr_addr)
1292 {
1293     struct target_mq_attr *target_mq_attr;
1294 
1295     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1296                           target_mq_attr_addr, 1))
1297         return -TARGET_EFAULT;
1298 
1299     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1300     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1301     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1302     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1303 
1304     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1305 
1306     return 0;
1307 }
1308 
1309 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1310                                             const struct mq_attr *attr)
1311 {
1312     struct target_mq_attr *target_mq_attr;
1313 
1314     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1315                           target_mq_attr_addr, 0))
1316         return -TARGET_EFAULT;
1317 
1318     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1319     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1320     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1321     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1322 
1323     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1324 
1325     return 0;
1326 }
1327 #endif
1328 
1329 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1330 /* do_select() must return target values and target errnos. */
1331 static abi_long do_select(int n,
1332                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1333                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1334 {
1335     fd_set rfds, wfds, efds;
1336     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1337     struct timeval tv;
1338     struct timespec ts, *ts_ptr;
1339     abi_long ret;
1340 
1341     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1346     if (ret) {
1347         return ret;
1348     }
1349     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1350     if (ret) {
1351         return ret;
1352     }
1353 
1354     if (target_tv_addr) {
1355         if (copy_from_user_timeval(&tv, target_tv_addr))
1356             return -TARGET_EFAULT;
1357         ts.tv_sec = tv.tv_sec;
1358         ts.tv_nsec = tv.tv_usec * 1000;
1359         ts_ptr = &ts;
1360     } else {
1361         ts_ptr = NULL;
1362     }
1363 
1364     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1365                                   ts_ptr, NULL));
1366 
1367     if (!is_error(ret)) {
1368         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1369             return -TARGET_EFAULT;
1370         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1371             return -TARGET_EFAULT;
1372         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1373             return -TARGET_EFAULT;
1374 
1375         if (target_tv_addr) {
1376             tv.tv_sec = ts.tv_sec;
1377             tv.tv_usec = ts.tv_nsec / 1000;
1378             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1379                 return -TARGET_EFAULT;
1380             }
1381         }
1382     }
1383 
1384     return ret;
1385 }
1386 
1387 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1388 static abi_long do_old_select(abi_ulong arg1)
1389 {
1390     struct target_sel_arg_struct *sel;
1391     abi_ulong inp, outp, exp, tvp;
1392     long nsel;
1393 
1394     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1395         return -TARGET_EFAULT;
1396     }
1397 
1398     nsel = tswapal(sel->n);
1399     inp = tswapal(sel->inp);
1400     outp = tswapal(sel->outp);
1401     exp = tswapal(sel->exp);
1402     tvp = tswapal(sel->tvp);
1403 
1404     unlock_user_struct(sel, arg1, 0);
1405 
1406     return do_select(nsel, inp, outp, exp, tvp);
1407 }
1408 #endif
1409 #endif
1410 
1411 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1412 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1413                             abi_long arg4, abi_long arg5, abi_long arg6,
1414                             bool time64)
1415 {
1416     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1417     fd_set rfds, wfds, efds;
1418     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1419     struct timespec ts, *ts_ptr;
1420     abi_long ret;
1421 
1422     /*
1423      * The 6th arg is actually two args smashed together,
1424      * so we cannot use the C library.
1425      */
1426     struct {
1427         sigset_t *set;
1428         size_t size;
1429     } sig, *sig_ptr;
1430 
1431     abi_ulong arg_sigset, arg_sigsize, *arg7;
1432 
1433     n = arg1;
1434     rfd_addr = arg2;
1435     wfd_addr = arg3;
1436     efd_addr = arg4;
1437     ts_addr = arg5;
1438 
1439     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1440     if (ret) {
1441         return ret;
1442     }
1443     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1444     if (ret) {
1445         return ret;
1446     }
1447     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1448     if (ret) {
1449         return ret;
1450     }
1451 
1452     /*
1453      * This takes a timespec, and not a timeval, so we cannot
1454      * use the do_select() helper ...
1455      */
1456     if (ts_addr) {
1457         if (time64) {
1458             if (target_to_host_timespec64(&ts, ts_addr)) {
1459                 return -TARGET_EFAULT;
1460             }
1461         } else {
1462             if (target_to_host_timespec(&ts, ts_addr)) {
1463                 return -TARGET_EFAULT;
1464             }
1465         }
1466             ts_ptr = &ts;
1467     } else {
1468         ts_ptr = NULL;
1469     }
1470 
1471     /* Extract the two packed args for the sigset */
1472     sig_ptr = NULL;
1473     if (arg6) {
1474         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1475         if (!arg7) {
1476             return -TARGET_EFAULT;
1477         }
1478         arg_sigset = tswapal(arg7[0]);
1479         arg_sigsize = tswapal(arg7[1]);
1480         unlock_user(arg7, arg6, 0);
1481 
1482         if (arg_sigset) {
1483             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1484             if (ret != 0) {
1485                 return ret;
1486             }
1487             sig_ptr = &sig;
1488             sig.size = SIGSET_T_SIZE;
1489         }
1490     }
1491 
1492     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1493                                   ts_ptr, sig_ptr));
1494 
1495     if (sig_ptr) {
1496         finish_sigsuspend_mask(ret);
1497     }
1498 
1499     if (!is_error(ret)) {
1500         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1507             return -TARGET_EFAULT;
1508         }
1509         if (time64) {
1510             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1511                 return -TARGET_EFAULT;
1512             }
1513         } else {
1514             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1515                 return -TARGET_EFAULT;
1516             }
1517         }
1518     }
1519     return ret;
1520 }
1521 #endif
1522 
1523 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1524     defined(TARGET_NR_ppoll_time64)
1525 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1526                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1527 {
1528     struct target_pollfd *target_pfd;
1529     unsigned int nfds = arg2;
1530     struct pollfd *pfd;
1531     unsigned int i;
1532     abi_long ret;
1533 
1534     pfd = NULL;
1535     target_pfd = NULL;
1536     if (nfds) {
1537         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1538             return -TARGET_EINVAL;
1539         }
1540         target_pfd = lock_user(VERIFY_WRITE, arg1,
1541                                sizeof(struct target_pollfd) * nfds, 1);
1542         if (!target_pfd) {
1543             return -TARGET_EFAULT;
1544         }
1545 
1546         pfd = alloca(sizeof(struct pollfd) * nfds);
1547         for (i = 0; i < nfds; i++) {
1548             pfd[i].fd = tswap32(target_pfd[i].fd);
1549             pfd[i].events = tswap16(target_pfd[i].events);
1550         }
1551     }
1552     if (ppoll) {
1553         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1554         sigset_t *set = NULL;
1555 
1556         if (arg3) {
1557             if (time64) {
1558                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1559                     unlock_user(target_pfd, arg1, 0);
1560                     return -TARGET_EFAULT;
1561                 }
1562             } else {
1563                 if (target_to_host_timespec(timeout_ts, arg3)) {
1564                     unlock_user(target_pfd, arg1, 0);
1565                     return -TARGET_EFAULT;
1566                 }
1567             }
1568         } else {
1569             timeout_ts = NULL;
1570         }
1571 
1572         if (arg4) {
1573             ret = process_sigsuspend_mask(&set, arg4, arg5);
1574             if (ret != 0) {
1575                 unlock_user(target_pfd, arg1, 0);
1576                 return ret;
1577             }
1578         }
1579 
1580         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1581                                    set, SIGSET_T_SIZE));
1582 
1583         if (set) {
1584             finish_sigsuspend_mask(ret);
1585         }
1586         if (!is_error(ret) && arg3) {
1587             if (time64) {
1588                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1589                     return -TARGET_EFAULT;
1590                 }
1591             } else {
1592                 if (host_to_target_timespec(arg3, timeout_ts)) {
1593                     return -TARGET_EFAULT;
1594                 }
1595             }
1596         }
1597     } else {
1598           struct timespec ts, *pts;
1599 
1600           if (arg3 >= 0) {
1601               /* Convert ms to secs, ns */
1602               ts.tv_sec = arg3 / 1000;
1603               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1604               pts = &ts;
1605           } else {
1606               /* -ve poll() timeout means "infinite" */
1607               pts = NULL;
1608           }
1609           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1610     }
1611 
1612     if (!is_error(ret)) {
1613         for (i = 0; i < nfds; i++) {
1614             target_pfd[i].revents = tswap16(pfd[i].revents);
1615         }
1616     }
1617     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1618     return ret;
1619 }
1620 #endif
1621 
1622 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1623                         int flags, int is_pipe2)
1624 {
1625     int host_pipe[2];
1626     abi_long ret;
1627     ret = pipe2(host_pipe, flags);
1628 
1629     if (is_error(ret))
1630         return get_errno(ret);
1631 
1632     /* Several targets have special calling conventions for the original
1633        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1634     if (!is_pipe2) {
1635 #if defined(TARGET_ALPHA)
1636         cpu_env->ir[IR_A4] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_MIPS)
1639         cpu_env->active_tc.gpr[3] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SH4)
1642         cpu_env->gregs[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #elif defined(TARGET_SPARC)
1645         cpu_env->regwptr[1] = host_pipe[1];
1646         return host_pipe[0];
1647 #endif
1648     }
1649 
1650     if (put_user_s32(host_pipe[0], pipedes)
1651         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1652         return -TARGET_EFAULT;
1653     return get_errno(ret);
1654 }
1655 
1656 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1657                                                abi_ulong target_addr,
1658                                                socklen_t len)
1659 {
1660     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1661     sa_family_t sa_family;
1662     struct target_sockaddr *target_saddr;
1663 
1664     if (fd_trans_target_to_host_addr(fd)) {
1665         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1666     }
1667 
1668     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1669     if (!target_saddr)
1670         return -TARGET_EFAULT;
1671 
1672     sa_family = tswap16(target_saddr->sa_family);
1673 
1674     /* Oops. The caller might send a incomplete sun_path; sun_path
1675      * must be terminated by \0 (see the manual page), but
1676      * unfortunately it is quite common to specify sockaddr_un
1677      * length as "strlen(x->sun_path)" while it should be
1678      * "strlen(...) + 1". We'll fix that here if needed.
1679      * Linux kernel has a similar feature.
1680      */
1681 
1682     if (sa_family == AF_UNIX) {
1683         if (len < unix_maxlen && len > 0) {
1684             char *cp = (char*)target_saddr;
1685 
1686             if ( cp[len-1] && !cp[len] )
1687                 len++;
1688         }
1689         if (len > unix_maxlen)
1690             len = unix_maxlen;
1691     }
1692 
1693     memcpy(addr, target_saddr, len);
1694     addr->sa_family = sa_family;
1695     if (sa_family == AF_NETLINK) {
1696         struct sockaddr_nl *nladdr;
1697 
1698         nladdr = (struct sockaddr_nl *)addr;
1699         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1700         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1701     } else if (sa_family == AF_PACKET) {
1702 	struct target_sockaddr_ll *lladdr;
1703 
1704 	lladdr = (struct target_sockaddr_ll *)addr;
1705 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1706 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1707     } else if (sa_family == AF_INET6) {
1708         struct sockaddr_in6 *in6addr;
1709 
1710         in6addr = (struct sockaddr_in6 *)addr;
1711         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1712     }
1713     unlock_user(target_saddr, target_addr, 0);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1719                                                struct sockaddr *addr,
1720                                                socklen_t len)
1721 {
1722     struct target_sockaddr *target_saddr;
1723 
1724     if (len == 0) {
1725         return 0;
1726     }
1727     assert(addr);
1728 
1729     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1730     if (!target_saddr)
1731         return -TARGET_EFAULT;
1732     memcpy(target_saddr, addr, len);
1733     if (len >= offsetof(struct target_sockaddr, sa_family) +
1734         sizeof(target_saddr->sa_family)) {
1735         target_saddr->sa_family = tswap16(addr->sa_family);
1736     }
1737     if (addr->sa_family == AF_NETLINK &&
1738         len >= sizeof(struct target_sockaddr_nl)) {
1739         struct target_sockaddr_nl *target_nl =
1740                (struct target_sockaddr_nl *)target_saddr;
1741         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1742         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1743     } else if (addr->sa_family == AF_PACKET) {
1744         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1745         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1746         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1747     } else if (addr->sa_family == AF_INET6 &&
1748                len >= sizeof(struct target_sockaddr_in6)) {
1749         struct target_sockaddr_in6 *target_in6 =
1750                (struct target_sockaddr_in6 *)target_saddr;
1751         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1752     }
1753     unlock_user(target_saddr, target_addr, len);
1754 
1755     return 0;
1756 }
1757 
1758 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1759                                            struct target_msghdr *target_msgh)
1760 {
1761     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1762     abi_long msg_controllen;
1763     abi_ulong target_cmsg_addr;
1764     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1765     socklen_t space = 0;
1766 
1767     msg_controllen = tswapal(target_msgh->msg_controllen);
1768     if (msg_controllen < sizeof (struct target_cmsghdr))
1769         goto the_end;
1770     target_cmsg_addr = tswapal(target_msgh->msg_control);
1771     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1772     target_cmsg_start = target_cmsg;
1773     if (!target_cmsg)
1774         return -TARGET_EFAULT;
1775 
1776     while (cmsg && target_cmsg) {
1777         void *data = CMSG_DATA(cmsg);
1778         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1779 
1780         int len = tswapal(target_cmsg->cmsg_len)
1781             - sizeof(struct target_cmsghdr);
1782 
1783         space += CMSG_SPACE(len);
1784         if (space > msgh->msg_controllen) {
1785             space -= CMSG_SPACE(len);
1786             /* This is a QEMU bug, since we allocated the payload
1787              * area ourselves (unlike overflow in host-to-target
1788              * conversion, which is just the guest giving us a buffer
1789              * that's too small). It can't happen for the payload types
1790              * we currently support; if it becomes an issue in future
1791              * we would need to improve our allocation strategy to
1792              * something more intelligent than "twice the size of the
1793              * target buffer we're reading from".
1794              */
1795             qemu_log_mask(LOG_UNIMP,
1796                           ("Unsupported ancillary data %d/%d: "
1797                            "unhandled msg size\n"),
1798                           tswap32(target_cmsg->cmsg_level),
1799                           tswap32(target_cmsg->cmsg_type));
1800             break;
1801         }
1802 
1803         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1804             cmsg->cmsg_level = SOL_SOCKET;
1805         } else {
1806             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1807         }
1808         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1809         cmsg->cmsg_len = CMSG_LEN(len);
1810 
1811         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1812             int *fd = (int *)data;
1813             int *target_fd = (int *)target_data;
1814             int i, numfds = len / sizeof(int);
1815 
1816             for (i = 0; i < numfds; i++) {
1817                 __get_user(fd[i], target_fd + i);
1818             }
1819         } else if (cmsg->cmsg_level == SOL_SOCKET
1820                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1821             struct ucred *cred = (struct ucred *)data;
1822             struct target_ucred *target_cred =
1823                 (struct target_ucred *)target_data;
1824 
1825             __get_user(cred->pid, &target_cred->pid);
1826             __get_user(cred->uid, &target_cred->uid);
1827             __get_user(cred->gid, &target_cred->gid);
1828         } else if (cmsg->cmsg_level == SOL_ALG) {
1829             uint32_t *dst = (uint32_t *)data;
1830 
1831             memcpy(dst, target_data, len);
1832             /* fix endianness of first 32-bit word */
1833             if (len >= sizeof(uint32_t)) {
1834                 *dst = tswap32(*dst);
1835             }
1836         } else {
1837             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1838                           cmsg->cmsg_level, cmsg->cmsg_type);
1839             memcpy(data, target_data, len);
1840         }
1841 
1842         cmsg = CMSG_NXTHDR(msgh, cmsg);
1843         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1844                                          target_cmsg_start);
1845     }
1846     unlock_user(target_cmsg, target_cmsg_addr, 0);
1847  the_end:
1848     msgh->msg_controllen = space;
1849     return 0;
1850 }
1851 
1852 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1853                                            struct msghdr *msgh)
1854 {
1855     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1856     abi_long msg_controllen;
1857     abi_ulong target_cmsg_addr;
1858     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1859     socklen_t space = 0;
1860 
1861     msg_controllen = tswapal(target_msgh->msg_controllen);
1862     if (msg_controllen < sizeof (struct target_cmsghdr))
1863         goto the_end;
1864     target_cmsg_addr = tswapal(target_msgh->msg_control);
1865     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1866     target_cmsg_start = target_cmsg;
1867     if (!target_cmsg)
1868         return -TARGET_EFAULT;
1869 
1870     while (cmsg && target_cmsg) {
1871         void *data = CMSG_DATA(cmsg);
1872         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1873 
1874         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1875         int tgt_len, tgt_space;
1876 
1877         /* We never copy a half-header but may copy half-data;
1878          * this is Linux's behaviour in put_cmsg(). Note that
1879          * truncation here is a guest problem (which we report
1880          * to the guest via the CTRUNC bit), unlike truncation
1881          * in target_to_host_cmsg, which is a QEMU bug.
1882          */
1883         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1884             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1885             break;
1886         }
1887 
1888         if (cmsg->cmsg_level == SOL_SOCKET) {
1889             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1890         } else {
1891             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1892         }
1893         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1894 
1895         /* Payload types which need a different size of payload on
1896          * the target must adjust tgt_len here.
1897          */
1898         tgt_len = len;
1899         switch (cmsg->cmsg_level) {
1900         case SOL_SOCKET:
1901             switch (cmsg->cmsg_type) {
1902             case SO_TIMESTAMP:
1903                 tgt_len = sizeof(struct target_timeval);
1904                 break;
1905             default:
1906                 break;
1907             }
1908             break;
1909         default:
1910             break;
1911         }
1912 
1913         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1914             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1915             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1916         }
1917 
1918         /* We must now copy-and-convert len bytes of payload
1919          * into tgt_len bytes of destination space. Bear in mind
1920          * that in both source and destination we may be dealing
1921          * with a truncated value!
1922          */
1923         switch (cmsg->cmsg_level) {
1924         case SOL_SOCKET:
1925             switch (cmsg->cmsg_type) {
1926             case SCM_RIGHTS:
1927             {
1928                 int *fd = (int *)data;
1929                 int *target_fd = (int *)target_data;
1930                 int i, numfds = tgt_len / sizeof(int);
1931 
1932                 for (i = 0; i < numfds; i++) {
1933                     __put_user(fd[i], target_fd + i);
1934                 }
1935                 break;
1936             }
1937             case SO_TIMESTAMP:
1938             {
1939                 struct timeval *tv = (struct timeval *)data;
1940                 struct target_timeval *target_tv =
1941                     (struct target_timeval *)target_data;
1942 
1943                 if (len != sizeof(struct timeval) ||
1944                     tgt_len != sizeof(struct target_timeval)) {
1945                     goto unimplemented;
1946                 }
1947 
1948                 /* copy struct timeval to target */
1949                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1950                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1951                 break;
1952             }
1953             case SCM_CREDENTIALS:
1954             {
1955                 struct ucred *cred = (struct ucred *)data;
1956                 struct target_ucred *target_cred =
1957                     (struct target_ucred *)target_data;
1958 
1959                 __put_user(cred->pid, &target_cred->pid);
1960                 __put_user(cred->uid, &target_cred->uid);
1961                 __put_user(cred->gid, &target_cred->gid);
1962                 break;
1963             }
1964             default:
1965                 goto unimplemented;
1966             }
1967             break;
1968 
1969         case SOL_IP:
1970             switch (cmsg->cmsg_type) {
1971             case IP_TTL:
1972             {
1973                 uint32_t *v = (uint32_t *)data;
1974                 uint32_t *t_int = (uint32_t *)target_data;
1975 
1976                 if (len != sizeof(uint32_t) ||
1977                     tgt_len != sizeof(uint32_t)) {
1978                     goto unimplemented;
1979                 }
1980                 __put_user(*v, t_int);
1981                 break;
1982             }
1983             case IP_RECVERR:
1984             {
1985                 struct errhdr_t {
1986                    struct sock_extended_err ee;
1987                    struct sockaddr_in offender;
1988                 };
1989                 struct errhdr_t *errh = (struct errhdr_t *)data;
1990                 struct errhdr_t *target_errh =
1991                     (struct errhdr_t *)target_data;
1992 
1993                 if (len != sizeof(struct errhdr_t) ||
1994                     tgt_len != sizeof(struct errhdr_t)) {
1995                     goto unimplemented;
1996                 }
1997                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1998                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1999                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2000                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2001                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2002                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2003                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2004                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2005                     (void *) &errh->offender, sizeof(errh->offender));
2006                 break;
2007             }
2008             case IP_PKTINFO:
2009             {
2010                 struct in_pktinfo *pkti = data;
2011                 struct target_in_pktinfo *target_pi = target_data;
2012 
2013                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2014                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2015                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2016                 break;
2017             }
2018             default:
2019                 goto unimplemented;
2020             }
2021             break;
2022 
2023         case SOL_IPV6:
2024             switch (cmsg->cmsg_type) {
2025             case IPV6_HOPLIMIT:
2026             {
2027                 uint32_t *v = (uint32_t *)data;
2028                 uint32_t *t_int = (uint32_t *)target_data;
2029 
2030                 if (len != sizeof(uint32_t) ||
2031                     tgt_len != sizeof(uint32_t)) {
2032                     goto unimplemented;
2033                 }
2034                 __put_user(*v, t_int);
2035                 break;
2036             }
2037             case IPV6_RECVERR:
2038             {
2039                 struct errhdr6_t {
2040                    struct sock_extended_err ee;
2041                    struct sockaddr_in6 offender;
2042                 };
2043                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2044                 struct errhdr6_t *target_errh =
2045                     (struct errhdr6_t *)target_data;
2046 
2047                 if (len != sizeof(struct errhdr6_t) ||
2048                     tgt_len != sizeof(struct errhdr6_t)) {
2049                     goto unimplemented;
2050                 }
2051                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2052                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2053                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2054                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2055                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2056                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2057                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2058                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2059                     (void *) &errh->offender, sizeof(errh->offender));
2060                 break;
2061             }
2062             default:
2063                 goto unimplemented;
2064             }
2065             break;
2066 
2067         default:
2068         unimplemented:
2069             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2070                           cmsg->cmsg_level, cmsg->cmsg_type);
2071             memcpy(target_data, data, MIN(len, tgt_len));
2072             if (tgt_len > len) {
2073                 memset(target_data + len, 0, tgt_len - len);
2074             }
2075         }
2076 
2077         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2078         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2079         if (msg_controllen < tgt_space) {
2080             tgt_space = msg_controllen;
2081         }
2082         msg_controllen -= tgt_space;
2083         space += tgt_space;
2084         cmsg = CMSG_NXTHDR(msgh, cmsg);
2085         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2086                                          target_cmsg_start);
2087     }
2088     unlock_user(target_cmsg, target_cmsg_addr, space);
2089  the_end:
2090     target_msgh->msg_controllen = tswapal(space);
2091     return 0;
2092 }
2093 
2094 /* do_setsockopt() Must return target values and target errnos. */
2095 static abi_long do_setsockopt(int sockfd, int level, int optname,
2096                               abi_ulong optval_addr, socklen_t optlen)
2097 {
2098     abi_long ret;
2099     int val;
2100 
2101     switch(level) {
2102     case SOL_TCP:
2103     case SOL_UDP:
2104         /* TCP and UDP options all take an 'int' value.  */
2105         if (optlen < sizeof(uint32_t))
2106             return -TARGET_EINVAL;
2107 
2108         if (get_user_u32(val, optval_addr))
2109             return -TARGET_EFAULT;
2110         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2111         break;
2112     case SOL_IP:
2113         switch(optname) {
2114         case IP_TOS:
2115         case IP_TTL:
2116         case IP_HDRINCL:
2117         case IP_ROUTER_ALERT:
2118         case IP_RECVOPTS:
2119         case IP_RETOPTS:
2120         case IP_PKTINFO:
2121         case IP_MTU_DISCOVER:
2122         case IP_RECVERR:
2123         case IP_RECVTTL:
2124         case IP_RECVTOS:
2125 #ifdef IP_FREEBIND
2126         case IP_FREEBIND:
2127 #endif
2128         case IP_MULTICAST_TTL:
2129         case IP_MULTICAST_LOOP:
2130             val = 0;
2131             if (optlen >= sizeof(uint32_t)) {
2132                 if (get_user_u32(val, optval_addr))
2133                     return -TARGET_EFAULT;
2134             } else if (optlen >= 1) {
2135                 if (get_user_u8(val, optval_addr))
2136                     return -TARGET_EFAULT;
2137             }
2138             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2139             break;
2140         case IP_MULTICAST_IF:
2141         case IP_ADD_MEMBERSHIP:
2142         case IP_DROP_MEMBERSHIP:
2143         {
2144             struct ip_mreqn ip_mreq;
2145             struct target_ip_mreqn *target_smreqn;
2146             int min_size;
2147 
2148             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2149                               sizeof(struct target_ip_mreq));
2150 
2151             if (optname == IP_MULTICAST_IF) {
2152                 min_size = sizeof(struct in_addr);
2153             } else {
2154                 min_size = sizeof(struct target_ip_mreq);
2155             }
2156             if (optlen < min_size ||
2157                 optlen > sizeof (struct target_ip_mreqn)) {
2158                 return -TARGET_EINVAL;
2159             }
2160 
2161             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162             if (!target_smreqn) {
2163                 return -TARGET_EFAULT;
2164             }
2165             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2166             if (optlen >= sizeof(struct target_ip_mreq)) {
2167                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2168                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2169                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2170                     optlen = sizeof(struct ip_mreqn);
2171                 }
2172             }
2173             unlock_user(target_smreqn, optval_addr, 0);
2174             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2175             break;
2176         }
2177         case IP_BLOCK_SOURCE:
2178         case IP_UNBLOCK_SOURCE:
2179         case IP_ADD_SOURCE_MEMBERSHIP:
2180         case IP_DROP_SOURCE_MEMBERSHIP:
2181         {
2182             struct ip_mreq_source *ip_mreq_source;
2183 
2184             if (optlen != sizeof (struct target_ip_mreq_source))
2185                 return -TARGET_EINVAL;
2186 
2187             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2188             if (!ip_mreq_source) {
2189                 return -TARGET_EFAULT;
2190             }
2191             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2192             unlock_user (ip_mreq_source, optval_addr, 0);
2193             break;
2194         }
2195         default:
2196             goto unimplemented;
2197         }
2198         break;
2199     case SOL_IPV6:
2200         switch (optname) {
2201         case IPV6_MTU_DISCOVER:
2202         case IPV6_MTU:
2203         case IPV6_V6ONLY:
2204         case IPV6_RECVPKTINFO:
2205         case IPV6_UNICAST_HOPS:
2206         case IPV6_MULTICAST_HOPS:
2207         case IPV6_MULTICAST_LOOP:
2208         case IPV6_RECVERR:
2209         case IPV6_RECVHOPLIMIT:
2210         case IPV6_2292HOPLIMIT:
2211         case IPV6_CHECKSUM:
2212         case IPV6_ADDRFORM:
2213         case IPV6_2292PKTINFO:
2214         case IPV6_RECVTCLASS:
2215         case IPV6_RECVRTHDR:
2216         case IPV6_2292RTHDR:
2217         case IPV6_RECVHOPOPTS:
2218         case IPV6_2292HOPOPTS:
2219         case IPV6_RECVDSTOPTS:
2220         case IPV6_2292DSTOPTS:
2221         case IPV6_TCLASS:
2222         case IPV6_ADDR_PREFERENCES:
2223 #ifdef IPV6_RECVPATHMTU
2224         case IPV6_RECVPATHMTU:
2225 #endif
2226 #ifdef IPV6_TRANSPARENT
2227         case IPV6_TRANSPARENT:
2228 #endif
2229 #ifdef IPV6_FREEBIND
2230         case IPV6_FREEBIND:
2231 #endif
2232 #ifdef IPV6_RECVORIGDSTADDR
2233         case IPV6_RECVORIGDSTADDR:
2234 #endif
2235             val = 0;
2236             if (optlen < sizeof(uint32_t)) {
2237                 return -TARGET_EINVAL;
2238             }
2239             if (get_user_u32(val, optval_addr)) {
2240                 return -TARGET_EFAULT;
2241             }
2242             ret = get_errno(setsockopt(sockfd, level, optname,
2243                                        &val, sizeof(val)));
2244             break;
2245         case IPV6_PKTINFO:
2246         {
2247             struct in6_pktinfo pki;
2248 
2249             if (optlen < sizeof(pki)) {
2250                 return -TARGET_EINVAL;
2251             }
2252 
2253             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2254                 return -TARGET_EFAULT;
2255             }
2256 
2257             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2258 
2259             ret = get_errno(setsockopt(sockfd, level, optname,
2260                                        &pki, sizeof(pki)));
2261             break;
2262         }
2263         case IPV6_ADD_MEMBERSHIP:
2264         case IPV6_DROP_MEMBERSHIP:
2265         {
2266             struct ipv6_mreq ipv6mreq;
2267 
2268             if (optlen < sizeof(ipv6mreq)) {
2269                 return -TARGET_EINVAL;
2270             }
2271 
2272             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2273                 return -TARGET_EFAULT;
2274             }
2275 
2276             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2277 
2278             ret = get_errno(setsockopt(sockfd, level, optname,
2279                                        &ipv6mreq, sizeof(ipv6mreq)));
2280             break;
2281         }
2282         default:
2283             goto unimplemented;
2284         }
2285         break;
2286     case SOL_ICMPV6:
2287         switch (optname) {
2288         case ICMPV6_FILTER:
2289         {
2290             struct icmp6_filter icmp6f;
2291 
2292             if (optlen > sizeof(icmp6f)) {
2293                 optlen = sizeof(icmp6f);
2294             }
2295 
2296             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2297                 return -TARGET_EFAULT;
2298             }
2299 
2300             for (val = 0; val < 8; val++) {
2301                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2302             }
2303 
2304             ret = get_errno(setsockopt(sockfd, level, optname,
2305                                        &icmp6f, optlen));
2306             break;
2307         }
2308         default:
2309             goto unimplemented;
2310         }
2311         break;
2312     case SOL_RAW:
2313         switch (optname) {
2314         case ICMP_FILTER:
2315         case IPV6_CHECKSUM:
2316             /* those take an u32 value */
2317             if (optlen < sizeof(uint32_t)) {
2318                 return -TARGET_EINVAL;
2319             }
2320 
2321             if (get_user_u32(val, optval_addr)) {
2322                 return -TARGET_EFAULT;
2323             }
2324             ret = get_errno(setsockopt(sockfd, level, optname,
2325                                        &val, sizeof(val)));
2326             break;
2327 
2328         default:
2329             goto unimplemented;
2330         }
2331         break;
2332 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2333     case SOL_ALG:
2334         switch (optname) {
2335         case ALG_SET_KEY:
2336         {
2337             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2338             if (!alg_key) {
2339                 return -TARGET_EFAULT;
2340             }
2341             ret = get_errno(setsockopt(sockfd, level, optname,
2342                                        alg_key, optlen));
2343             unlock_user(alg_key, optval_addr, optlen);
2344             break;
2345         }
2346         case ALG_SET_AEAD_AUTHSIZE:
2347         {
2348             ret = get_errno(setsockopt(sockfd, level, optname,
2349                                        NULL, optlen));
2350             break;
2351         }
2352         default:
2353             goto unimplemented;
2354         }
2355         break;
2356 #endif
2357     case TARGET_SOL_SOCKET:
2358         switch (optname) {
2359         case TARGET_SO_RCVTIMEO:
2360         case TARGET_SO_SNDTIMEO:
2361         {
2362                 struct timeval tv;
2363 
2364                 if (optlen != sizeof(struct target_timeval)) {
2365                     return -TARGET_EINVAL;
2366                 }
2367 
2368                 if (copy_from_user_timeval(&tv, optval_addr)) {
2369                     return -TARGET_EFAULT;
2370                 }
2371 
2372                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2373                                 optname == TARGET_SO_RCVTIMEO ?
2374                                     SO_RCVTIMEO : SO_SNDTIMEO,
2375                                 &tv, sizeof(tv)));
2376                 return ret;
2377         }
2378         case TARGET_SO_ATTACH_FILTER:
2379         {
2380                 struct target_sock_fprog *tfprog;
2381                 struct target_sock_filter *tfilter;
2382                 struct sock_fprog fprog;
2383                 struct sock_filter *filter;
2384                 int i;
2385 
2386                 if (optlen != sizeof(*tfprog)) {
2387                     return -TARGET_EINVAL;
2388                 }
2389                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2390                     return -TARGET_EFAULT;
2391                 }
2392                 if (!lock_user_struct(VERIFY_READ, tfilter,
2393                                       tswapal(tfprog->filter), 0)) {
2394                     unlock_user_struct(tfprog, optval_addr, 1);
2395                     return -TARGET_EFAULT;
2396                 }
2397 
2398                 fprog.len = tswap16(tfprog->len);
2399                 filter = g_try_new(struct sock_filter, fprog.len);
2400                 if (filter == NULL) {
2401                     unlock_user_struct(tfilter, tfprog->filter, 1);
2402                     unlock_user_struct(tfprog, optval_addr, 1);
2403                     return -TARGET_ENOMEM;
2404                 }
2405                 for (i = 0; i < fprog.len; i++) {
2406                     filter[i].code = tswap16(tfilter[i].code);
2407                     filter[i].jt = tfilter[i].jt;
2408                     filter[i].jf = tfilter[i].jf;
2409                     filter[i].k = tswap32(tfilter[i].k);
2410                 }
2411                 fprog.filter = filter;
2412 
2413                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2414                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2415                 g_free(filter);
2416 
2417                 unlock_user_struct(tfilter, tfprog->filter, 1);
2418                 unlock_user_struct(tfprog, optval_addr, 1);
2419                 return ret;
2420         }
2421 	case TARGET_SO_BINDTODEVICE:
2422 	{
2423 		char *dev_ifname, *addr_ifname;
2424 
2425 		if (optlen > IFNAMSIZ - 1) {
2426 		    optlen = IFNAMSIZ - 1;
2427 		}
2428 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2429 		if (!dev_ifname) {
2430 		    return -TARGET_EFAULT;
2431 		}
2432 		optname = SO_BINDTODEVICE;
2433 		addr_ifname = alloca(IFNAMSIZ);
2434 		memcpy(addr_ifname, dev_ifname, optlen);
2435 		addr_ifname[optlen] = 0;
2436 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2437                                            addr_ifname, optlen));
2438 		unlock_user (dev_ifname, optval_addr, 0);
2439 		return ret;
2440 	}
2441         case TARGET_SO_LINGER:
2442         {
2443                 struct linger lg;
2444                 struct target_linger *tlg;
2445 
2446                 if (optlen != sizeof(struct target_linger)) {
2447                     return -TARGET_EINVAL;
2448                 }
2449                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2450                     return -TARGET_EFAULT;
2451                 }
2452                 __get_user(lg.l_onoff, &tlg->l_onoff);
2453                 __get_user(lg.l_linger, &tlg->l_linger);
2454                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2455                                 &lg, sizeof(lg)));
2456                 unlock_user_struct(tlg, optval_addr, 0);
2457                 return ret;
2458         }
2459             /* Options with 'int' argument.  */
2460         case TARGET_SO_DEBUG:
2461 		optname = SO_DEBUG;
2462 		break;
2463         case TARGET_SO_REUSEADDR:
2464 		optname = SO_REUSEADDR;
2465 		break;
2466 #ifdef SO_REUSEPORT
2467         case TARGET_SO_REUSEPORT:
2468                 optname = SO_REUSEPORT;
2469                 break;
2470 #endif
2471         case TARGET_SO_TYPE:
2472 		optname = SO_TYPE;
2473 		break;
2474         case TARGET_SO_ERROR:
2475 		optname = SO_ERROR;
2476 		break;
2477         case TARGET_SO_DONTROUTE:
2478 		optname = SO_DONTROUTE;
2479 		break;
2480         case TARGET_SO_BROADCAST:
2481 		optname = SO_BROADCAST;
2482 		break;
2483         case TARGET_SO_SNDBUF:
2484 		optname = SO_SNDBUF;
2485 		break;
2486         case TARGET_SO_SNDBUFFORCE:
2487                 optname = SO_SNDBUFFORCE;
2488                 break;
2489         case TARGET_SO_RCVBUF:
2490 		optname = SO_RCVBUF;
2491 		break;
2492         case TARGET_SO_RCVBUFFORCE:
2493                 optname = SO_RCVBUFFORCE;
2494                 break;
2495         case TARGET_SO_KEEPALIVE:
2496 		optname = SO_KEEPALIVE;
2497 		break;
2498         case TARGET_SO_OOBINLINE:
2499 		optname = SO_OOBINLINE;
2500 		break;
2501         case TARGET_SO_NO_CHECK:
2502 		optname = SO_NO_CHECK;
2503 		break;
2504         case TARGET_SO_PRIORITY:
2505 		optname = SO_PRIORITY;
2506 		break;
2507 #ifdef SO_BSDCOMPAT
2508         case TARGET_SO_BSDCOMPAT:
2509 		optname = SO_BSDCOMPAT;
2510 		break;
2511 #endif
2512         case TARGET_SO_PASSCRED:
2513 		optname = SO_PASSCRED;
2514 		break;
2515         case TARGET_SO_PASSSEC:
2516                 optname = SO_PASSSEC;
2517                 break;
2518         case TARGET_SO_TIMESTAMP:
2519 		optname = SO_TIMESTAMP;
2520 		break;
2521         case TARGET_SO_RCVLOWAT:
2522 		optname = SO_RCVLOWAT;
2523 		break;
2524         default:
2525             goto unimplemented;
2526         }
2527 	if (optlen < sizeof(uint32_t))
2528             return -TARGET_EINVAL;
2529 
2530 	if (get_user_u32(val, optval_addr))
2531             return -TARGET_EFAULT;
2532 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2533         break;
2534 #ifdef SOL_NETLINK
2535     case SOL_NETLINK:
2536         switch (optname) {
2537         case NETLINK_PKTINFO:
2538         case NETLINK_ADD_MEMBERSHIP:
2539         case NETLINK_DROP_MEMBERSHIP:
2540         case NETLINK_BROADCAST_ERROR:
2541         case NETLINK_NO_ENOBUFS:
2542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2543         case NETLINK_LISTEN_ALL_NSID:
2544         case NETLINK_CAP_ACK:
2545 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2547         case NETLINK_EXT_ACK:
2548 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2549 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2550         case NETLINK_GET_STRICT_CHK:
2551 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2552             break;
2553         default:
2554             goto unimplemented;
2555         }
2556         val = 0;
2557         if (optlen < sizeof(uint32_t)) {
2558             return -TARGET_EINVAL;
2559         }
2560         if (get_user_u32(val, optval_addr)) {
2561             return -TARGET_EFAULT;
2562         }
2563         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2564                                    sizeof(val)));
2565         break;
2566 #endif /* SOL_NETLINK */
2567     default:
2568     unimplemented:
2569         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2570                       level, optname);
2571         ret = -TARGET_ENOPROTOOPT;
2572     }
2573     return ret;
2574 }
2575 
2576 /* do_getsockopt() Must return target values and target errnos. */
2577 static abi_long do_getsockopt(int sockfd, int level, int optname,
2578                               abi_ulong optval_addr, abi_ulong optlen)
2579 {
2580     abi_long ret;
2581     int len, val;
2582     socklen_t lv;
2583 
2584     switch(level) {
2585     case TARGET_SOL_SOCKET:
2586         level = SOL_SOCKET;
2587         switch (optname) {
2588         /* These don't just return a single integer */
2589         case TARGET_SO_PEERNAME:
2590             goto unimplemented;
2591         case TARGET_SO_RCVTIMEO: {
2592             struct timeval tv;
2593             socklen_t tvlen;
2594 
2595             optname = SO_RCVTIMEO;
2596 
2597 get_timeout:
2598             if (get_user_u32(len, optlen)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             if (len < 0) {
2602                 return -TARGET_EINVAL;
2603             }
2604 
2605             tvlen = sizeof(tv);
2606             ret = get_errno(getsockopt(sockfd, level, optname,
2607                                        &tv, &tvlen));
2608             if (ret < 0) {
2609                 return ret;
2610             }
2611             if (len > sizeof(struct target_timeval)) {
2612                 len = sizeof(struct target_timeval);
2613             }
2614             if (copy_to_user_timeval(optval_addr, &tv)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (put_user_u32(len, optlen)) {
2618                 return -TARGET_EFAULT;
2619             }
2620             break;
2621         }
2622         case TARGET_SO_SNDTIMEO:
2623             optname = SO_SNDTIMEO;
2624             goto get_timeout;
2625         case TARGET_SO_PEERCRED: {
2626             struct ucred cr;
2627             socklen_t crlen;
2628             struct target_ucred *tcr;
2629 
2630             if (get_user_u32(len, optlen)) {
2631                 return -TARGET_EFAULT;
2632             }
2633             if (len < 0) {
2634                 return -TARGET_EINVAL;
2635             }
2636 
2637             crlen = sizeof(cr);
2638             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2639                                        &cr, &crlen));
2640             if (ret < 0) {
2641                 return ret;
2642             }
2643             if (len > crlen) {
2644                 len = crlen;
2645             }
2646             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             __put_user(cr.pid, &tcr->pid);
2650             __put_user(cr.uid, &tcr->uid);
2651             __put_user(cr.gid, &tcr->gid);
2652             unlock_user_struct(tcr, optval_addr, 1);
2653             if (put_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             break;
2657         }
2658         case TARGET_SO_PEERSEC: {
2659             char *name;
2660 
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2668             if (!name) {
2669                 return -TARGET_EFAULT;
2670             }
2671             lv = len;
2672             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2673                                        name, &lv));
2674             if (put_user_u32(lv, optlen)) {
2675                 ret = -TARGET_EFAULT;
2676             }
2677             unlock_user(name, optval_addr, lv);
2678             break;
2679         }
2680         case TARGET_SO_LINGER:
2681         {
2682             struct linger lg;
2683             socklen_t lglen;
2684             struct target_linger *tlg;
2685 
2686             if (get_user_u32(len, optlen)) {
2687                 return -TARGET_EFAULT;
2688             }
2689             if (len < 0) {
2690                 return -TARGET_EINVAL;
2691             }
2692 
2693             lglen = sizeof(lg);
2694             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2695                                        &lg, &lglen));
2696             if (ret < 0) {
2697                 return ret;
2698             }
2699             if (len > lglen) {
2700                 len = lglen;
2701             }
2702             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2703                 return -TARGET_EFAULT;
2704             }
2705             __put_user(lg.l_onoff, &tlg->l_onoff);
2706             __put_user(lg.l_linger, &tlg->l_linger);
2707             unlock_user_struct(tlg, optval_addr, 1);
2708             if (put_user_u32(len, optlen)) {
2709                 return -TARGET_EFAULT;
2710             }
2711             break;
2712         }
2713         /* Options with 'int' argument.  */
2714         case TARGET_SO_DEBUG:
2715             optname = SO_DEBUG;
2716             goto int_case;
2717         case TARGET_SO_REUSEADDR:
2718             optname = SO_REUSEADDR;
2719             goto int_case;
2720 #ifdef SO_REUSEPORT
2721         case TARGET_SO_REUSEPORT:
2722             optname = SO_REUSEPORT;
2723             goto int_case;
2724 #endif
2725         case TARGET_SO_TYPE:
2726             optname = SO_TYPE;
2727             goto int_case;
2728         case TARGET_SO_ERROR:
2729             optname = SO_ERROR;
2730             goto int_case;
2731         case TARGET_SO_DONTROUTE:
2732             optname = SO_DONTROUTE;
2733             goto int_case;
2734         case TARGET_SO_BROADCAST:
2735             optname = SO_BROADCAST;
2736             goto int_case;
2737         case TARGET_SO_SNDBUF:
2738             optname = SO_SNDBUF;
2739             goto int_case;
2740         case TARGET_SO_RCVBUF:
2741             optname = SO_RCVBUF;
2742             goto int_case;
2743         case TARGET_SO_KEEPALIVE:
2744             optname = SO_KEEPALIVE;
2745             goto int_case;
2746         case TARGET_SO_OOBINLINE:
2747             optname = SO_OOBINLINE;
2748             goto int_case;
2749         case TARGET_SO_NO_CHECK:
2750             optname = SO_NO_CHECK;
2751             goto int_case;
2752         case TARGET_SO_PRIORITY:
2753             optname = SO_PRIORITY;
2754             goto int_case;
2755 #ifdef SO_BSDCOMPAT
2756         case TARGET_SO_BSDCOMPAT:
2757             optname = SO_BSDCOMPAT;
2758             goto int_case;
2759 #endif
2760         case TARGET_SO_PASSCRED:
2761             optname = SO_PASSCRED;
2762             goto int_case;
2763         case TARGET_SO_TIMESTAMP:
2764             optname = SO_TIMESTAMP;
2765             goto int_case;
2766         case TARGET_SO_RCVLOWAT:
2767             optname = SO_RCVLOWAT;
2768             goto int_case;
2769         case TARGET_SO_ACCEPTCONN:
2770             optname = SO_ACCEPTCONN;
2771             goto int_case;
2772         case TARGET_SO_PROTOCOL:
2773             optname = SO_PROTOCOL;
2774             goto int_case;
2775         case TARGET_SO_DOMAIN:
2776             optname = SO_DOMAIN;
2777             goto int_case;
2778         default:
2779             goto int_case;
2780         }
2781         break;
2782     case SOL_TCP:
2783     case SOL_UDP:
2784         /* TCP and UDP options all take an 'int' value.  */
2785     int_case:
2786         if (get_user_u32(len, optlen))
2787             return -TARGET_EFAULT;
2788         if (len < 0)
2789             return -TARGET_EINVAL;
2790         lv = sizeof(lv);
2791         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2792         if (ret < 0)
2793             return ret;
2794         switch (optname) {
2795         case SO_TYPE:
2796             val = host_to_target_sock_type(val);
2797             break;
2798         case SO_ERROR:
2799             val = host_to_target_errno(val);
2800             break;
2801         }
2802         if (len > lv)
2803             len = lv;
2804         if (len == 4) {
2805             if (put_user_u32(val, optval_addr))
2806                 return -TARGET_EFAULT;
2807         } else {
2808             if (put_user_u8(val, optval_addr))
2809                 return -TARGET_EFAULT;
2810         }
2811         if (put_user_u32(len, optlen))
2812             return -TARGET_EFAULT;
2813         break;
2814     case SOL_IP:
2815         switch(optname) {
2816         case IP_TOS:
2817         case IP_TTL:
2818         case IP_HDRINCL:
2819         case IP_ROUTER_ALERT:
2820         case IP_RECVOPTS:
2821         case IP_RETOPTS:
2822         case IP_PKTINFO:
2823         case IP_MTU_DISCOVER:
2824         case IP_RECVERR:
2825         case IP_RECVTOS:
2826 #ifdef IP_FREEBIND
2827         case IP_FREEBIND:
2828 #endif
2829         case IP_MULTICAST_TTL:
2830         case IP_MULTICAST_LOOP:
2831             if (get_user_u32(len, optlen))
2832                 return -TARGET_EFAULT;
2833             if (len < 0)
2834                 return -TARGET_EINVAL;
2835             lv = sizeof(lv);
2836             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2837             if (ret < 0)
2838                 return ret;
2839             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2840                 len = 1;
2841                 if (put_user_u32(len, optlen)
2842                     || put_user_u8(val, optval_addr))
2843                     return -TARGET_EFAULT;
2844             } else {
2845                 if (len > sizeof(int))
2846                     len = sizeof(int);
2847                 if (put_user_u32(len, optlen)
2848                     || put_user_u32(val, optval_addr))
2849                     return -TARGET_EFAULT;
2850             }
2851             break;
2852         default:
2853             ret = -TARGET_ENOPROTOOPT;
2854             break;
2855         }
2856         break;
2857     case SOL_IPV6:
2858         switch (optname) {
2859         case IPV6_MTU_DISCOVER:
2860         case IPV6_MTU:
2861         case IPV6_V6ONLY:
2862         case IPV6_RECVPKTINFO:
2863         case IPV6_UNICAST_HOPS:
2864         case IPV6_MULTICAST_HOPS:
2865         case IPV6_MULTICAST_LOOP:
2866         case IPV6_RECVERR:
2867         case IPV6_RECVHOPLIMIT:
2868         case IPV6_2292HOPLIMIT:
2869         case IPV6_CHECKSUM:
2870         case IPV6_ADDRFORM:
2871         case IPV6_2292PKTINFO:
2872         case IPV6_RECVTCLASS:
2873         case IPV6_RECVRTHDR:
2874         case IPV6_2292RTHDR:
2875         case IPV6_RECVHOPOPTS:
2876         case IPV6_2292HOPOPTS:
2877         case IPV6_RECVDSTOPTS:
2878         case IPV6_2292DSTOPTS:
2879         case IPV6_TCLASS:
2880         case IPV6_ADDR_PREFERENCES:
2881 #ifdef IPV6_RECVPATHMTU
2882         case IPV6_RECVPATHMTU:
2883 #endif
2884 #ifdef IPV6_TRANSPARENT
2885         case IPV6_TRANSPARENT:
2886 #endif
2887 #ifdef IPV6_FREEBIND
2888         case IPV6_FREEBIND:
2889 #endif
2890 #ifdef IPV6_RECVORIGDSTADDR
2891         case IPV6_RECVORIGDSTADDR:
2892 #endif
2893             if (get_user_u32(len, optlen))
2894                 return -TARGET_EFAULT;
2895             if (len < 0)
2896                 return -TARGET_EINVAL;
2897             lv = sizeof(lv);
2898             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2899             if (ret < 0)
2900                 return ret;
2901             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2902                 len = 1;
2903                 if (put_user_u32(len, optlen)
2904                     || put_user_u8(val, optval_addr))
2905                     return -TARGET_EFAULT;
2906             } else {
2907                 if (len > sizeof(int))
2908                     len = sizeof(int);
2909                 if (put_user_u32(len, optlen)
2910                     || put_user_u32(val, optval_addr))
2911                     return -TARGET_EFAULT;
2912             }
2913             break;
2914         default:
2915             ret = -TARGET_ENOPROTOOPT;
2916             break;
2917         }
2918         break;
2919 #ifdef SOL_NETLINK
2920     case SOL_NETLINK:
2921         switch (optname) {
2922         case NETLINK_PKTINFO:
2923         case NETLINK_BROADCAST_ERROR:
2924         case NETLINK_NO_ENOBUFS:
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2926         case NETLINK_LISTEN_ALL_NSID:
2927         case NETLINK_CAP_ACK:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2930         case NETLINK_EXT_ACK:
2931 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2932 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2933         case NETLINK_GET_STRICT_CHK:
2934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2935             if (get_user_u32(len, optlen)) {
2936                 return -TARGET_EFAULT;
2937             }
2938             if (len != sizeof(val)) {
2939                 return -TARGET_EINVAL;
2940             }
2941             lv = len;
2942             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2943             if (ret < 0) {
2944                 return ret;
2945             }
2946             if (put_user_u32(lv, optlen)
2947                 || put_user_u32(val, optval_addr)) {
2948                 return -TARGET_EFAULT;
2949             }
2950             break;
2951 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2952         case NETLINK_LIST_MEMBERSHIPS:
2953         {
2954             uint32_t *results;
2955             int i;
2956             if (get_user_u32(len, optlen)) {
2957                 return -TARGET_EFAULT;
2958             }
2959             if (len < 0) {
2960                 return -TARGET_EINVAL;
2961             }
2962             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2963             if (!results && len > 0) {
2964                 return -TARGET_EFAULT;
2965             }
2966             lv = len;
2967             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2968             if (ret < 0) {
2969                 unlock_user(results, optval_addr, 0);
2970                 return ret;
2971             }
2972             /* swap host endianness to target endianness. */
2973             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2974                 results[i] = tswap32(results[i]);
2975             }
2976             if (put_user_u32(lv, optlen)) {
2977                 return -TARGET_EFAULT;
2978             }
2979             unlock_user(results, optval_addr, 0);
2980             break;
2981         }
2982 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2983         default:
2984             goto unimplemented;
2985         }
2986         break;
2987 #endif /* SOL_NETLINK */
2988     default:
2989     unimplemented:
2990         qemu_log_mask(LOG_UNIMP,
2991                       "getsockopt level=%d optname=%d not yet supported\n",
2992                       level, optname);
2993         ret = -TARGET_EOPNOTSUPP;
2994         break;
2995     }
2996     return ret;
2997 }
2998 
2999 /* Convert target low/high pair representing file offset into the host
3000  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3001  * as the kernel doesn't handle them either.
3002  */
3003 static void target_to_host_low_high(abi_ulong tlow,
3004                                     abi_ulong thigh,
3005                                     unsigned long *hlow,
3006                                     unsigned long *hhigh)
3007 {
3008     uint64_t off = tlow |
3009         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3010         TARGET_LONG_BITS / 2;
3011 
3012     *hlow = off;
3013     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3014 }
3015 
3016 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3017                                 abi_ulong count, int copy)
3018 {
3019     struct target_iovec *target_vec;
3020     struct iovec *vec;
3021     abi_ulong total_len, max_len;
3022     int i;
3023     int err = 0;
3024     bool bad_address = false;
3025 
3026     if (count == 0) {
3027         errno = 0;
3028         return NULL;
3029     }
3030     if (count > IOV_MAX) {
3031         errno = EINVAL;
3032         return NULL;
3033     }
3034 
3035     vec = g_try_new0(struct iovec, count);
3036     if (vec == NULL) {
3037         errno = ENOMEM;
3038         return NULL;
3039     }
3040 
3041     target_vec = lock_user(VERIFY_READ, target_addr,
3042                            count * sizeof(struct target_iovec), 1);
3043     if (target_vec == NULL) {
3044         err = EFAULT;
3045         goto fail2;
3046     }
3047 
3048     /* ??? If host page size > target page size, this will result in a
3049        value larger than what we can actually support.  */
3050     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3051     total_len = 0;
3052 
3053     for (i = 0; i < count; i++) {
3054         abi_ulong base = tswapal(target_vec[i].iov_base);
3055         abi_long len = tswapal(target_vec[i].iov_len);
3056 
3057         if (len < 0) {
3058             err = EINVAL;
3059             goto fail;
3060         } else if (len == 0) {
3061             /* Zero length pointer is ignored.  */
3062             vec[i].iov_base = 0;
3063         } else {
3064             vec[i].iov_base = lock_user(type, base, len, copy);
3065             /* If the first buffer pointer is bad, this is a fault.  But
3066              * subsequent bad buffers will result in a partial write; this
3067              * is realized by filling the vector with null pointers and
3068              * zero lengths. */
3069             if (!vec[i].iov_base) {
3070                 if (i == 0) {
3071                     err = EFAULT;
3072                     goto fail;
3073                 } else {
3074                     bad_address = true;
3075                 }
3076             }
3077             if (bad_address) {
3078                 len = 0;
3079             }
3080             if (len > max_len - total_len) {
3081                 len = max_len - total_len;
3082             }
3083         }
3084         vec[i].iov_len = len;
3085         total_len += len;
3086     }
3087 
3088     unlock_user(target_vec, target_addr, 0);
3089     return vec;
3090 
3091  fail:
3092     while (--i >= 0) {
3093         if (tswapal(target_vec[i].iov_len) > 0) {
3094             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3095         }
3096     }
3097     unlock_user(target_vec, target_addr, 0);
3098  fail2:
3099     g_free(vec);
3100     errno = err;
3101     return NULL;
3102 }
3103 
3104 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3105                          abi_ulong count, int copy)
3106 {
3107     struct target_iovec *target_vec;
3108     int i;
3109 
3110     target_vec = lock_user(VERIFY_READ, target_addr,
3111                            count * sizeof(struct target_iovec), 1);
3112     if (target_vec) {
3113         for (i = 0; i < count; i++) {
3114             abi_ulong base = tswapal(target_vec[i].iov_base);
3115             abi_long len = tswapal(target_vec[i].iov_len);
3116             if (len < 0) {
3117                 break;
3118             }
3119             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3120         }
3121         unlock_user(target_vec, target_addr, 0);
3122     }
3123 
3124     g_free(vec);
3125 }
3126 
3127 static inline int target_to_host_sock_type(int *type)
3128 {
3129     int host_type = 0;
3130     int target_type = *type;
3131 
3132     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3133     case TARGET_SOCK_DGRAM:
3134         host_type = SOCK_DGRAM;
3135         break;
3136     case TARGET_SOCK_STREAM:
3137         host_type = SOCK_STREAM;
3138         break;
3139     default:
3140         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3141         break;
3142     }
3143     if (target_type & TARGET_SOCK_CLOEXEC) {
3144 #if defined(SOCK_CLOEXEC)
3145         host_type |= SOCK_CLOEXEC;
3146 #else
3147         return -TARGET_EINVAL;
3148 #endif
3149     }
3150     if (target_type & TARGET_SOCK_NONBLOCK) {
3151 #if defined(SOCK_NONBLOCK)
3152         host_type |= SOCK_NONBLOCK;
3153 #elif !defined(O_NONBLOCK)
3154         return -TARGET_EINVAL;
3155 #endif
3156     }
3157     *type = host_type;
3158     return 0;
3159 }
3160 
3161 /* Try to emulate socket type flags after socket creation.  */
3162 static int sock_flags_fixup(int fd, int target_type)
3163 {
3164 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3165     if (target_type & TARGET_SOCK_NONBLOCK) {
3166         int flags = fcntl(fd, F_GETFL);
3167         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3168             close(fd);
3169             return -TARGET_EINVAL;
3170         }
3171     }
3172 #endif
3173     return fd;
3174 }
3175 
3176 /* do_socket() Must return target values and target errnos. */
3177 static abi_long do_socket(int domain, int type, int protocol)
3178 {
3179     int target_type = type;
3180     int ret;
3181 
3182     ret = target_to_host_sock_type(&type);
3183     if (ret) {
3184         return ret;
3185     }
3186 
3187     if (domain == PF_NETLINK && !(
3188 #ifdef CONFIG_RTNETLINK
3189          protocol == NETLINK_ROUTE ||
3190 #endif
3191          protocol == NETLINK_KOBJECT_UEVENT ||
3192          protocol == NETLINK_AUDIT)) {
3193         return -TARGET_EPROTONOSUPPORT;
3194     }
3195 
3196     if (domain == AF_PACKET ||
3197         (domain == AF_INET && type == SOCK_PACKET)) {
3198         protocol = tswap16(protocol);
3199     }
3200 
3201     ret = get_errno(socket(domain, type, protocol));
3202     if (ret >= 0) {
3203         ret = sock_flags_fixup(ret, target_type);
3204         if (type == SOCK_PACKET) {
3205             /* Manage an obsolete case :
3206              * if socket type is SOCK_PACKET, bind by name
3207              */
3208             fd_trans_register(ret, &target_packet_trans);
3209         } else if (domain == PF_NETLINK) {
3210             switch (protocol) {
3211 #ifdef CONFIG_RTNETLINK
3212             case NETLINK_ROUTE:
3213                 fd_trans_register(ret, &target_netlink_route_trans);
3214                 break;
3215 #endif
3216             case NETLINK_KOBJECT_UEVENT:
3217                 /* nothing to do: messages are strings */
3218                 break;
3219             case NETLINK_AUDIT:
3220                 fd_trans_register(ret, &target_netlink_audit_trans);
3221                 break;
3222             default:
3223                 g_assert_not_reached();
3224             }
3225         }
3226     }
3227     return ret;
3228 }
3229 
3230 /* do_bind() Must return target values and target errnos. */
3231 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3232                         socklen_t addrlen)
3233 {
3234     void *addr;
3235     abi_long ret;
3236 
3237     if ((int)addrlen < 0) {
3238         return -TARGET_EINVAL;
3239     }
3240 
3241     addr = alloca(addrlen+1);
3242 
3243     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3244     if (ret)
3245         return ret;
3246 
3247     return get_errno(bind(sockfd, addr, addrlen));
3248 }
3249 
3250 /* do_connect() Must return target values and target errnos. */
3251 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3252                            socklen_t addrlen)
3253 {
3254     void *addr;
3255     abi_long ret;
3256 
3257     if ((int)addrlen < 0) {
3258         return -TARGET_EINVAL;
3259     }
3260 
3261     addr = alloca(addrlen+1);
3262 
3263     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3264     if (ret)
3265         return ret;
3266 
3267     return get_errno(safe_connect(sockfd, addr, addrlen));
3268 }
3269 
3270 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3271 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3272                                       int flags, int send)
3273 {
3274     abi_long ret, len;
3275     struct msghdr msg;
3276     abi_ulong count;
3277     struct iovec *vec;
3278     abi_ulong target_vec;
3279 
3280     if (msgp->msg_name) {
3281         msg.msg_namelen = tswap32(msgp->msg_namelen);
3282         msg.msg_name = alloca(msg.msg_namelen+1);
3283         ret = target_to_host_sockaddr(fd, msg.msg_name,
3284                                       tswapal(msgp->msg_name),
3285                                       msg.msg_namelen);
3286         if (ret == -TARGET_EFAULT) {
3287             /* For connected sockets msg_name and msg_namelen must
3288              * be ignored, so returning EFAULT immediately is wrong.
3289              * Instead, pass a bad msg_name to the host kernel, and
3290              * let it decide whether to return EFAULT or not.
3291              */
3292             msg.msg_name = (void *)-1;
3293         } else if (ret) {
3294             goto out2;
3295         }
3296     } else {
3297         msg.msg_name = NULL;
3298         msg.msg_namelen = 0;
3299     }
3300     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3301     msg.msg_control = alloca(msg.msg_controllen);
3302     memset(msg.msg_control, 0, msg.msg_controllen);
3303 
3304     msg.msg_flags = tswap32(msgp->msg_flags);
3305 
3306     count = tswapal(msgp->msg_iovlen);
3307     target_vec = tswapal(msgp->msg_iov);
3308 
3309     if (count > IOV_MAX) {
3310         /* sendrcvmsg returns a different errno for this condition than
3311          * readv/writev, so we must catch it here before lock_iovec() does.
3312          */
3313         ret = -TARGET_EMSGSIZE;
3314         goto out2;
3315     }
3316 
3317     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3318                      target_vec, count, send);
3319     if (vec == NULL) {
3320         ret = -host_to_target_errno(errno);
3321         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3322         if (!send || ret) {
3323             goto out2;
3324         }
3325     }
3326     msg.msg_iovlen = count;
3327     msg.msg_iov = vec;
3328 
3329     if (send) {
3330         if (fd_trans_target_to_host_data(fd)) {
3331             void *host_msg;
3332 
3333             host_msg = g_malloc(msg.msg_iov->iov_len);
3334             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3335             ret = fd_trans_target_to_host_data(fd)(host_msg,
3336                                                    msg.msg_iov->iov_len);
3337             if (ret >= 0) {
3338                 msg.msg_iov->iov_base = host_msg;
3339                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3340             }
3341             g_free(host_msg);
3342         } else {
3343             ret = target_to_host_cmsg(&msg, msgp);
3344             if (ret == 0) {
3345                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3346             }
3347         }
3348     } else {
3349         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3350         if (!is_error(ret)) {
3351             len = ret;
3352             if (fd_trans_host_to_target_data(fd)) {
3353                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3354                                                MIN(msg.msg_iov->iov_len, len));
3355             }
3356             if (!is_error(ret)) {
3357                 ret = host_to_target_cmsg(msgp, &msg);
3358             }
3359             if (!is_error(ret)) {
3360                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3361                 msgp->msg_flags = tswap32(msg.msg_flags);
3362                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3363                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3364                                     msg.msg_name, msg.msg_namelen);
3365                     if (ret) {
3366                         goto out;
3367                     }
3368                 }
3369 
3370                 ret = len;
3371             }
3372         }
3373     }
3374 
3375 out:
3376     if (vec) {
3377         unlock_iovec(vec, target_vec, count, !send);
3378     }
3379 out2:
3380     return ret;
3381 }
3382 
3383 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3384                                int flags, int send)
3385 {
3386     abi_long ret;
3387     struct target_msghdr *msgp;
3388 
3389     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3390                           msgp,
3391                           target_msg,
3392                           send ? 1 : 0)) {
3393         return -TARGET_EFAULT;
3394     }
3395     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3396     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3397     return ret;
3398 }
3399 
3400 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3401  * so it might not have this *mmsg-specific flag either.
3402  */
3403 #ifndef MSG_WAITFORONE
3404 #define MSG_WAITFORONE 0x10000
3405 #endif
3406 
3407 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3408                                 unsigned int vlen, unsigned int flags,
3409                                 int send)
3410 {
3411     struct target_mmsghdr *mmsgp;
3412     abi_long ret = 0;
3413     int i;
3414 
3415     if (vlen > UIO_MAXIOV) {
3416         vlen = UIO_MAXIOV;
3417     }
3418 
3419     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3420     if (!mmsgp) {
3421         return -TARGET_EFAULT;
3422     }
3423 
3424     for (i = 0; i < vlen; i++) {
3425         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3426         if (is_error(ret)) {
3427             break;
3428         }
3429         mmsgp[i].msg_len = tswap32(ret);
3430         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3431         if (flags & MSG_WAITFORONE) {
3432             flags |= MSG_DONTWAIT;
3433         }
3434     }
3435 
3436     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3437 
3438     /* Return number of datagrams sent if we sent any at all;
3439      * otherwise return the error.
3440      */
3441     if (i) {
3442         return i;
3443     }
3444     return ret;
3445 }
3446 
3447 /* do_accept4() Must return target values and target errnos. */
3448 static abi_long do_accept4(int fd, abi_ulong target_addr,
3449                            abi_ulong target_addrlen_addr, int flags)
3450 {
3451     socklen_t addrlen, ret_addrlen;
3452     void *addr;
3453     abi_long ret;
3454     int host_flags;
3455 
3456     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3457         return -TARGET_EINVAL;
3458     }
3459 
3460     host_flags = 0;
3461     if (flags & TARGET_SOCK_NONBLOCK) {
3462         host_flags |= SOCK_NONBLOCK;
3463     }
3464     if (flags & TARGET_SOCK_CLOEXEC) {
3465         host_flags |= SOCK_CLOEXEC;
3466     }
3467 
3468     if (target_addr == 0) {
3469         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3470     }
3471 
3472     /* linux returns EFAULT if addrlen pointer is invalid */
3473     if (get_user_u32(addrlen, target_addrlen_addr))
3474         return -TARGET_EFAULT;
3475 
3476     if ((int)addrlen < 0) {
3477         return -TARGET_EINVAL;
3478     }
3479 
3480     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3481         return -TARGET_EFAULT;
3482     }
3483 
3484     addr = alloca(addrlen);
3485 
3486     ret_addrlen = addrlen;
3487     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3488     if (!is_error(ret)) {
3489         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3490         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3491             ret = -TARGET_EFAULT;
3492         }
3493     }
3494     return ret;
3495 }
3496 
3497 /* do_getpeername() Must return target values and target errnos. */
3498 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3499                                abi_ulong target_addrlen_addr)
3500 {
3501     socklen_t addrlen, ret_addrlen;
3502     void *addr;
3503     abi_long ret;
3504 
3505     if (get_user_u32(addrlen, target_addrlen_addr))
3506         return -TARGET_EFAULT;
3507 
3508     if ((int)addrlen < 0) {
3509         return -TARGET_EINVAL;
3510     }
3511 
3512     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3513         return -TARGET_EFAULT;
3514     }
3515 
3516     addr = alloca(addrlen);
3517 
3518     ret_addrlen = addrlen;
3519     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3520     if (!is_error(ret)) {
3521         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3522         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3523             ret = -TARGET_EFAULT;
3524         }
3525     }
3526     return ret;
3527 }
3528 
3529 /* do_getsockname() Must return target values and target errnos. */
3530 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3531                                abi_ulong target_addrlen_addr)
3532 {
3533     socklen_t addrlen, ret_addrlen;
3534     void *addr;
3535     abi_long ret;
3536 
3537     if (get_user_u32(addrlen, target_addrlen_addr))
3538         return -TARGET_EFAULT;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3545         return -TARGET_EFAULT;
3546     }
3547 
3548     addr = alloca(addrlen);
3549 
3550     ret_addrlen = addrlen;
3551     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3552     if (!is_error(ret)) {
3553         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3554         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3555             ret = -TARGET_EFAULT;
3556         }
3557     }
3558     return ret;
3559 }
3560 
3561 /* do_socketpair() Must return target values and target errnos. */
3562 static abi_long do_socketpair(int domain, int type, int protocol,
3563                               abi_ulong target_tab_addr)
3564 {
3565     int tab[2];
3566     abi_long ret;
3567 
3568     target_to_host_sock_type(&type);
3569 
3570     ret = get_errno(socketpair(domain, type, protocol, tab));
3571     if (!is_error(ret)) {
3572         if (put_user_s32(tab[0], target_tab_addr)
3573             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3574             ret = -TARGET_EFAULT;
3575     }
3576     return ret;
3577 }
3578 
3579 /* do_sendto() Must return target values and target errnos. */
3580 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3581                           abi_ulong target_addr, socklen_t addrlen)
3582 {
3583     void *addr;
3584     void *host_msg = NULL;
3585     void *copy_msg = NULL;
3586     abi_long ret;
3587 
3588     if ((int)addrlen < 0) {
3589         return -TARGET_EINVAL;
3590     }
3591 
3592     if (len != 0) {
3593         host_msg = lock_user(VERIFY_READ, msg, len, 1);
3594         if (!host_msg) {
3595             return -TARGET_EFAULT;
3596         }
3597         if (fd_trans_target_to_host_data(fd)) {
3598             copy_msg = host_msg;
3599             host_msg = g_malloc(len);
3600             memcpy(host_msg, copy_msg, len);
3601             ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3602             if (ret < 0) {
3603                 goto fail;
3604             }
3605         }
3606     }
3607     if (target_addr) {
3608         addr = alloca(addrlen+1);
3609         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3610         if (ret) {
3611             goto fail;
3612         }
3613         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3614     } else {
3615         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3616     }
3617 fail:
3618     if (copy_msg) {
3619         g_free(host_msg);
3620         host_msg = copy_msg;
3621     }
3622     unlock_user(host_msg, msg, 0);
3623     return ret;
3624 }
3625 
3626 /* do_recvfrom() Must return target values and target errnos. */
3627 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3628                             abi_ulong target_addr,
3629                             abi_ulong target_addrlen)
3630 {
3631     socklen_t addrlen, ret_addrlen;
3632     void *addr;
3633     void *host_msg;
3634     abi_long ret;
3635 
3636     if (!msg) {
3637         host_msg = NULL;
3638     } else {
3639         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3640         if (!host_msg) {
3641             return -TARGET_EFAULT;
3642         }
3643     }
3644     if (target_addr) {
3645         if (get_user_u32(addrlen, target_addrlen)) {
3646             ret = -TARGET_EFAULT;
3647             goto fail;
3648         }
3649         if ((int)addrlen < 0) {
3650             ret = -TARGET_EINVAL;
3651             goto fail;
3652         }
3653         addr = alloca(addrlen);
3654         ret_addrlen = addrlen;
3655         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3656                                       addr, &ret_addrlen));
3657     } else {
3658         addr = NULL; /* To keep compiler quiet.  */
3659         addrlen = 0; /* To keep compiler quiet.  */
3660         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3661     }
3662     if (!is_error(ret)) {
3663         if (fd_trans_host_to_target_data(fd)) {
3664             abi_long trans;
3665             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3666             if (is_error(trans)) {
3667                 ret = trans;
3668                 goto fail;
3669             }
3670         }
3671         if (target_addr) {
3672             host_to_target_sockaddr(target_addr, addr,
3673                                     MIN(addrlen, ret_addrlen));
3674             if (put_user_u32(ret_addrlen, target_addrlen)) {
3675                 ret = -TARGET_EFAULT;
3676                 goto fail;
3677             }
3678         }
3679         unlock_user(host_msg, msg, len);
3680     } else {
3681 fail:
3682         unlock_user(host_msg, msg, 0);
3683     }
3684     return ret;
3685 }
3686 
3687 #ifdef TARGET_NR_socketcall
3688 /* do_socketcall() must return target values and target errnos. */
3689 static abi_long do_socketcall(int num, abi_ulong vptr)
3690 {
3691     static const unsigned nargs[] = { /* number of arguments per operation */
3692         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3693         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3694         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3695         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3696         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3697         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3698         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3699         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3700         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3701         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3702         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3703         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3704         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3705         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3706         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3707         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3708         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3709         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3710         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3711         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3712     };
3713     abi_long a[6]; /* max 6 args */
3714     unsigned i;
3715 
3716     /* check the range of the first argument num */
3717     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3718     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3719         return -TARGET_EINVAL;
3720     }
3721     /* ensure we have space for args */
3722     if (nargs[num] > ARRAY_SIZE(a)) {
3723         return -TARGET_EINVAL;
3724     }
3725     /* collect the arguments in a[] according to nargs[] */
3726     for (i = 0; i < nargs[num]; ++i) {
3727         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3728             return -TARGET_EFAULT;
3729         }
3730     }
3731     /* now when we have the args, invoke the appropriate underlying function */
3732     switch (num) {
3733     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3734         return do_socket(a[0], a[1], a[2]);
3735     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3736         return do_bind(a[0], a[1], a[2]);
3737     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3738         return do_connect(a[0], a[1], a[2]);
3739     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3740         return get_errno(listen(a[0], a[1]));
3741     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3742         return do_accept4(a[0], a[1], a[2], 0);
3743     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3744         return do_getsockname(a[0], a[1], a[2]);
3745     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3746         return do_getpeername(a[0], a[1], a[2]);
3747     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3748         return do_socketpair(a[0], a[1], a[2], a[3]);
3749     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3750         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3751     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3752         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3753     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3754         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3755     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3756         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3757     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3758         return get_errno(shutdown(a[0], a[1]));
3759     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3760         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3761     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3762         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3763     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3764         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3765     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3766         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3767     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3768         return do_accept4(a[0], a[1], a[2], a[3]);
3769     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3770         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3771     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3772         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3773     default:
3774         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3775         return -TARGET_EINVAL;
3776     }
3777 }
3778 #endif
3779 
3780 #ifndef TARGET_SEMID64_DS
3781 /* asm-generic version of this struct */
3782 struct target_semid64_ds
3783 {
3784   struct target_ipc_perm sem_perm;
3785   abi_ulong sem_otime;
3786 #if TARGET_ABI_BITS == 32
3787   abi_ulong __unused1;
3788 #endif
3789   abi_ulong sem_ctime;
3790 #if TARGET_ABI_BITS == 32
3791   abi_ulong __unused2;
3792 #endif
3793   abi_ulong sem_nsems;
3794   abi_ulong __unused3;
3795   abi_ulong __unused4;
3796 };
3797 #endif
3798 
3799 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3800                                                abi_ulong target_addr)
3801 {
3802     struct target_ipc_perm *target_ip;
3803     struct target_semid64_ds *target_sd;
3804 
3805     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3806         return -TARGET_EFAULT;
3807     target_ip = &(target_sd->sem_perm);
3808     host_ip->__key = tswap32(target_ip->__key);
3809     host_ip->uid = tswap32(target_ip->uid);
3810     host_ip->gid = tswap32(target_ip->gid);
3811     host_ip->cuid = tswap32(target_ip->cuid);
3812     host_ip->cgid = tswap32(target_ip->cgid);
3813 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3814     host_ip->mode = tswap32(target_ip->mode);
3815 #else
3816     host_ip->mode = tswap16(target_ip->mode);
3817 #endif
3818 #if defined(TARGET_PPC)
3819     host_ip->__seq = tswap32(target_ip->__seq);
3820 #else
3821     host_ip->__seq = tswap16(target_ip->__seq);
3822 #endif
3823     unlock_user_struct(target_sd, target_addr, 0);
3824     return 0;
3825 }
3826 
3827 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3828                                                struct ipc_perm *host_ip)
3829 {
3830     struct target_ipc_perm *target_ip;
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     target_ip = &(target_sd->sem_perm);
3836     target_ip->__key = tswap32(host_ip->__key);
3837     target_ip->uid = tswap32(host_ip->uid);
3838     target_ip->gid = tswap32(host_ip->gid);
3839     target_ip->cuid = tswap32(host_ip->cuid);
3840     target_ip->cgid = tswap32(host_ip->cgid);
3841 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3842     target_ip->mode = tswap32(host_ip->mode);
3843 #else
3844     target_ip->mode = tswap16(host_ip->mode);
3845 #endif
3846 #if defined(TARGET_PPC)
3847     target_ip->__seq = tswap32(host_ip->__seq);
3848 #else
3849     target_ip->__seq = tswap16(host_ip->__seq);
3850 #endif
3851     unlock_user_struct(target_sd, target_addr, 1);
3852     return 0;
3853 }
3854 
3855 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3856                                                abi_ulong target_addr)
3857 {
3858     struct target_semid64_ds *target_sd;
3859 
3860     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3861         return -TARGET_EFAULT;
3862     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3863         return -TARGET_EFAULT;
3864     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3865     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3866     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3867     unlock_user_struct(target_sd, target_addr, 0);
3868     return 0;
3869 }
3870 
3871 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3872                                                struct semid_ds *host_sd)
3873 {
3874     struct target_semid64_ds *target_sd;
3875 
3876     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3877         return -TARGET_EFAULT;
3878     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3879         return -TARGET_EFAULT;
3880     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3881     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3882     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3883     unlock_user_struct(target_sd, target_addr, 1);
3884     return 0;
3885 }
3886 
3887 struct target_seminfo {
3888     int semmap;
3889     int semmni;
3890     int semmns;
3891     int semmnu;
3892     int semmsl;
3893     int semopm;
3894     int semume;
3895     int semusz;
3896     int semvmx;
3897     int semaem;
3898 };
3899 
3900 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3901                                               struct seminfo *host_seminfo)
3902 {
3903     struct target_seminfo *target_seminfo;
3904     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3905         return -TARGET_EFAULT;
3906     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3907     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3908     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3909     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3910     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3911     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3912     __put_user(host_seminfo->semume, &target_seminfo->semume);
3913     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3914     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3915     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3916     unlock_user_struct(target_seminfo, target_addr, 1);
3917     return 0;
3918 }
3919 
3920 union semun {
3921 	int val;
3922 	struct semid_ds *buf;
3923 	unsigned short *array;
3924 	struct seminfo *__buf;
3925 };
3926 
3927 union target_semun {
3928 	int val;
3929 	abi_ulong buf;
3930 	abi_ulong array;
3931 	abi_ulong __buf;
3932 };
3933 
3934 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3935                                                abi_ulong target_addr)
3936 {
3937     int nsems;
3938     unsigned short *array;
3939     union semun semun;
3940     struct semid_ds semid_ds;
3941     int i, ret;
3942 
3943     semun.buf = &semid_ds;
3944 
3945     ret = semctl(semid, 0, IPC_STAT, semun);
3946     if (ret == -1)
3947         return get_errno(ret);
3948 
3949     nsems = semid_ds.sem_nsems;
3950 
3951     *host_array = g_try_new(unsigned short, nsems);
3952     if (!*host_array) {
3953         return -TARGET_ENOMEM;
3954     }
3955     array = lock_user(VERIFY_READ, target_addr,
3956                       nsems*sizeof(unsigned short), 1);
3957     if (!array) {
3958         g_free(*host_array);
3959         return -TARGET_EFAULT;
3960     }
3961 
3962     for(i=0; i<nsems; i++) {
3963         __get_user((*host_array)[i], &array[i]);
3964     }
3965     unlock_user(array, target_addr, 0);
3966 
3967     return 0;
3968 }
3969 
3970 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3971                                                unsigned short **host_array)
3972 {
3973     int nsems;
3974     unsigned short *array;
3975     union semun semun;
3976     struct semid_ds semid_ds;
3977     int i, ret;
3978 
3979     semun.buf = &semid_ds;
3980 
3981     ret = semctl(semid, 0, IPC_STAT, semun);
3982     if (ret == -1)
3983         return get_errno(ret);
3984 
3985     nsems = semid_ds.sem_nsems;
3986 
3987     array = lock_user(VERIFY_WRITE, target_addr,
3988                       nsems*sizeof(unsigned short), 0);
3989     if (!array)
3990         return -TARGET_EFAULT;
3991 
3992     for(i=0; i<nsems; i++) {
3993         __put_user((*host_array)[i], &array[i]);
3994     }
3995     g_free(*host_array);
3996     unlock_user(array, target_addr, 1);
3997 
3998     return 0;
3999 }
4000 
4001 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4002                                  abi_ulong target_arg)
4003 {
4004     union target_semun target_su = { .buf = target_arg };
4005     union semun arg;
4006     struct semid_ds dsarg;
4007     unsigned short *array = NULL;
4008     struct seminfo seminfo;
4009     abi_long ret = -TARGET_EINVAL;
4010     abi_long err;
4011     cmd &= 0xff;
4012 
4013     switch( cmd ) {
4014 	case GETVAL:
4015 	case SETVAL:
4016             /* In 64 bit cross-endian situations, we will erroneously pick up
4017              * the wrong half of the union for the "val" element.  To rectify
4018              * this, the entire 8-byte structure is byteswapped, followed by
4019 	     * a swap of the 4 byte val field. In other cases, the data is
4020 	     * already in proper host byte order. */
4021 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4022 		target_su.buf = tswapal(target_su.buf);
4023 		arg.val = tswap32(target_su.val);
4024 	    } else {
4025 		arg.val = target_su.val;
4026 	    }
4027             ret = get_errno(semctl(semid, semnum, cmd, arg));
4028             break;
4029 	case GETALL:
4030 	case SETALL:
4031             err = target_to_host_semarray(semid, &array, target_su.array);
4032             if (err)
4033                 return err;
4034             arg.array = array;
4035             ret = get_errno(semctl(semid, semnum, cmd, arg));
4036             err = host_to_target_semarray(semid, target_su.array, &array);
4037             if (err)
4038                 return err;
4039             break;
4040 	case IPC_STAT:
4041 	case IPC_SET:
4042 	case SEM_STAT:
4043             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4044             if (err)
4045                 return err;
4046             arg.buf = &dsarg;
4047             ret = get_errno(semctl(semid, semnum, cmd, arg));
4048             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4049             if (err)
4050                 return err;
4051             break;
4052 	case IPC_INFO:
4053 	case SEM_INFO:
4054             arg.__buf = &seminfo;
4055             ret = get_errno(semctl(semid, semnum, cmd, arg));
4056             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4057             if (err)
4058                 return err;
4059             break;
4060 	case IPC_RMID:
4061 	case GETPID:
4062 	case GETNCNT:
4063 	case GETZCNT:
4064             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4065             break;
4066     }
4067 
4068     return ret;
4069 }
4070 
4071 struct target_sembuf {
4072     unsigned short sem_num;
4073     short sem_op;
4074     short sem_flg;
4075 };
4076 
4077 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4078                                              abi_ulong target_addr,
4079                                              unsigned nsops)
4080 {
4081     struct target_sembuf *target_sembuf;
4082     int i;
4083 
4084     target_sembuf = lock_user(VERIFY_READ, target_addr,
4085                               nsops*sizeof(struct target_sembuf), 1);
4086     if (!target_sembuf)
4087         return -TARGET_EFAULT;
4088 
4089     for(i=0; i<nsops; i++) {
4090         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4091         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4092         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4093     }
4094 
4095     unlock_user(target_sembuf, target_addr, 0);
4096 
4097     return 0;
4098 }
4099 
4100 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4101     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4102 
4103 /*
4104  * This macro is required to handle the s390 variants, which passes the
4105  * arguments in a different order than default.
4106  */
4107 #ifdef __s390x__
4108 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4109   (__nsops), (__timeout), (__sops)
4110 #else
4111 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4112   (__nsops), 0, (__sops), (__timeout)
4113 #endif
4114 
4115 static inline abi_long do_semtimedop(int semid,
4116                                      abi_long ptr,
4117                                      unsigned nsops,
4118                                      abi_long timeout, bool time64)
4119 {
4120     struct sembuf *sops;
4121     struct timespec ts, *pts = NULL;
4122     abi_long ret;
4123 
4124     if (timeout) {
4125         pts = &ts;
4126         if (time64) {
4127             if (target_to_host_timespec64(pts, timeout)) {
4128                 return -TARGET_EFAULT;
4129             }
4130         } else {
4131             if (target_to_host_timespec(pts, timeout)) {
4132                 return -TARGET_EFAULT;
4133             }
4134         }
4135     }
4136 
4137     if (nsops > TARGET_SEMOPM) {
4138         return -TARGET_E2BIG;
4139     }
4140 
4141     sops = g_new(struct sembuf, nsops);
4142 
4143     if (target_to_host_sembuf(sops, ptr, nsops)) {
4144         g_free(sops);
4145         return -TARGET_EFAULT;
4146     }
4147 
4148     ret = -TARGET_ENOSYS;
4149 #ifdef __NR_semtimedop
4150     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4151 #endif
4152 #ifdef __NR_ipc
4153     if (ret == -TARGET_ENOSYS) {
4154         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4155                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4156     }
4157 #endif
4158     g_free(sops);
4159     return ret;
4160 }
4161 #endif
4162 
4163 struct target_msqid_ds
4164 {
4165     struct target_ipc_perm msg_perm;
4166     abi_ulong msg_stime;
4167 #if TARGET_ABI_BITS == 32
4168     abi_ulong __unused1;
4169 #endif
4170     abi_ulong msg_rtime;
4171 #if TARGET_ABI_BITS == 32
4172     abi_ulong __unused2;
4173 #endif
4174     abi_ulong msg_ctime;
4175 #if TARGET_ABI_BITS == 32
4176     abi_ulong __unused3;
4177 #endif
4178     abi_ulong __msg_cbytes;
4179     abi_ulong msg_qnum;
4180     abi_ulong msg_qbytes;
4181     abi_ulong msg_lspid;
4182     abi_ulong msg_lrpid;
4183     abi_ulong __unused4;
4184     abi_ulong __unused5;
4185 };
4186 
4187 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4188                                                abi_ulong target_addr)
4189 {
4190     struct target_msqid_ds *target_md;
4191 
4192     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4193         return -TARGET_EFAULT;
4194     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4195         return -TARGET_EFAULT;
4196     host_md->msg_stime = tswapal(target_md->msg_stime);
4197     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4198     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4199     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4200     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4201     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4202     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4203     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4204     unlock_user_struct(target_md, target_addr, 0);
4205     return 0;
4206 }
4207 
4208 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4209                                                struct msqid_ds *host_md)
4210 {
4211     struct target_msqid_ds *target_md;
4212 
4213     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4214         return -TARGET_EFAULT;
4215     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4216         return -TARGET_EFAULT;
4217     target_md->msg_stime = tswapal(host_md->msg_stime);
4218     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4219     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4220     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4221     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4222     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4223     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4224     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4225     unlock_user_struct(target_md, target_addr, 1);
4226     return 0;
4227 }
4228 
4229 struct target_msginfo {
4230     int msgpool;
4231     int msgmap;
4232     int msgmax;
4233     int msgmnb;
4234     int msgmni;
4235     int msgssz;
4236     int msgtql;
4237     unsigned short int msgseg;
4238 };
4239 
4240 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4241                                               struct msginfo *host_msginfo)
4242 {
4243     struct target_msginfo *target_msginfo;
4244     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4245         return -TARGET_EFAULT;
4246     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4247     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4248     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4249     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4250     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4251     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4252     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4253     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4254     unlock_user_struct(target_msginfo, target_addr, 1);
4255     return 0;
4256 }
4257 
4258 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4259 {
4260     struct msqid_ds dsarg;
4261     struct msginfo msginfo;
4262     abi_long ret = -TARGET_EINVAL;
4263 
4264     cmd &= 0xff;
4265 
4266     switch (cmd) {
4267     case IPC_STAT:
4268     case IPC_SET:
4269     case MSG_STAT:
4270         if (target_to_host_msqid_ds(&dsarg,ptr))
4271             return -TARGET_EFAULT;
4272         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4273         if (host_to_target_msqid_ds(ptr,&dsarg))
4274             return -TARGET_EFAULT;
4275         break;
4276     case IPC_RMID:
4277         ret = get_errno(msgctl(msgid, cmd, NULL));
4278         break;
4279     case IPC_INFO:
4280     case MSG_INFO:
4281         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4282         if (host_to_target_msginfo(ptr, &msginfo))
4283             return -TARGET_EFAULT;
4284         break;
4285     }
4286 
4287     return ret;
4288 }
4289 
4290 struct target_msgbuf {
4291     abi_long mtype;
4292     char	mtext[1];
4293 };
4294 
4295 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4296                                  ssize_t msgsz, int msgflg)
4297 {
4298     struct target_msgbuf *target_mb;
4299     struct msgbuf *host_mb;
4300     abi_long ret = 0;
4301 
4302     if (msgsz < 0) {
4303         return -TARGET_EINVAL;
4304     }
4305 
4306     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4307         return -TARGET_EFAULT;
4308     host_mb = g_try_malloc(msgsz + sizeof(long));
4309     if (!host_mb) {
4310         unlock_user_struct(target_mb, msgp, 0);
4311         return -TARGET_ENOMEM;
4312     }
4313     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4314     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4315     ret = -TARGET_ENOSYS;
4316 #ifdef __NR_msgsnd
4317     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4318 #endif
4319 #ifdef __NR_ipc
4320     if (ret == -TARGET_ENOSYS) {
4321 #ifdef __s390x__
4322         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4323                                  host_mb));
4324 #else
4325         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4326                                  host_mb, 0));
4327 #endif
4328     }
4329 #endif
4330     g_free(host_mb);
4331     unlock_user_struct(target_mb, msgp, 0);
4332 
4333     return ret;
4334 }
4335 
4336 #ifdef __NR_ipc
4337 #if defined(__sparc__)
4338 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4339 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4340 #elif defined(__s390x__)
4341 /* The s390 sys_ipc variant has only five parameters.  */
4342 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4343     ((long int[]){(long int)__msgp, __msgtyp})
4344 #else
4345 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4346     ((long int[]){(long int)__msgp, __msgtyp}), 0
4347 #endif
4348 #endif
4349 
4350 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4351                                  ssize_t msgsz, abi_long msgtyp,
4352                                  int msgflg)
4353 {
4354     struct target_msgbuf *target_mb;
4355     char *target_mtext;
4356     struct msgbuf *host_mb;
4357     abi_long ret = 0;
4358 
4359     if (msgsz < 0) {
4360         return -TARGET_EINVAL;
4361     }
4362 
4363     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4364         return -TARGET_EFAULT;
4365 
4366     host_mb = g_try_malloc(msgsz + sizeof(long));
4367     if (!host_mb) {
4368         ret = -TARGET_ENOMEM;
4369         goto end;
4370     }
4371     ret = -TARGET_ENOSYS;
4372 #ifdef __NR_msgrcv
4373     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4374 #endif
4375 #ifdef __NR_ipc
4376     if (ret == -TARGET_ENOSYS) {
4377         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4378                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4379     }
4380 #endif
4381 
4382     if (ret > 0) {
4383         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4384         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4385         if (!target_mtext) {
4386             ret = -TARGET_EFAULT;
4387             goto end;
4388         }
4389         memcpy(target_mb->mtext, host_mb->mtext, ret);
4390         unlock_user(target_mtext, target_mtext_addr, ret);
4391     }
4392 
4393     target_mb->mtype = tswapal(host_mb->mtype);
4394 
4395 end:
4396     if (target_mb)
4397         unlock_user_struct(target_mb, msgp, 1);
4398     g_free(host_mb);
4399     return ret;
4400 }
4401 
4402 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4403                                                abi_ulong target_addr)
4404 {
4405     struct target_shmid_ds *target_sd;
4406 
4407     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4408         return -TARGET_EFAULT;
4409     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4410         return -TARGET_EFAULT;
4411     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4412     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4413     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4414     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4415     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4416     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4417     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4418     unlock_user_struct(target_sd, target_addr, 0);
4419     return 0;
4420 }
4421 
4422 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4423                                                struct shmid_ds *host_sd)
4424 {
4425     struct target_shmid_ds *target_sd;
4426 
4427     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4428         return -TARGET_EFAULT;
4429     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4430         return -TARGET_EFAULT;
4431     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4432     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4433     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4434     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4435     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4436     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4437     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4438     unlock_user_struct(target_sd, target_addr, 1);
4439     return 0;
4440 }
4441 
4442 struct  target_shminfo {
4443     abi_ulong shmmax;
4444     abi_ulong shmmin;
4445     abi_ulong shmmni;
4446     abi_ulong shmseg;
4447     abi_ulong shmall;
4448 };
4449 
4450 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4451                                               struct shminfo *host_shminfo)
4452 {
4453     struct target_shminfo *target_shminfo;
4454     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4455         return -TARGET_EFAULT;
4456     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4457     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4458     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4459     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4460     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4461     unlock_user_struct(target_shminfo, target_addr, 1);
4462     return 0;
4463 }
4464 
4465 struct target_shm_info {
4466     int used_ids;
4467     abi_ulong shm_tot;
4468     abi_ulong shm_rss;
4469     abi_ulong shm_swp;
4470     abi_ulong swap_attempts;
4471     abi_ulong swap_successes;
4472 };
4473 
4474 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4475                                                struct shm_info *host_shm_info)
4476 {
4477     struct target_shm_info *target_shm_info;
4478     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4479         return -TARGET_EFAULT;
4480     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4481     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4482     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4483     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4484     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4485     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4486     unlock_user_struct(target_shm_info, target_addr, 1);
4487     return 0;
4488 }
4489 
4490 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4491 {
4492     struct shmid_ds dsarg;
4493     struct shminfo shminfo;
4494     struct shm_info shm_info;
4495     abi_long ret = -TARGET_EINVAL;
4496 
4497     cmd &= 0xff;
4498 
4499     switch(cmd) {
4500     case IPC_STAT:
4501     case IPC_SET:
4502     case SHM_STAT:
4503         if (target_to_host_shmid_ds(&dsarg, buf))
4504             return -TARGET_EFAULT;
4505         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4506         if (host_to_target_shmid_ds(buf, &dsarg))
4507             return -TARGET_EFAULT;
4508         break;
4509     case IPC_INFO:
4510         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4511         if (host_to_target_shminfo(buf, &shminfo))
4512             return -TARGET_EFAULT;
4513         break;
4514     case SHM_INFO:
4515         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4516         if (host_to_target_shm_info(buf, &shm_info))
4517             return -TARGET_EFAULT;
4518         break;
4519     case IPC_RMID:
4520     case SHM_LOCK:
4521     case SHM_UNLOCK:
4522         ret = get_errno(shmctl(shmid, cmd, NULL));
4523         break;
4524     }
4525 
4526     return ret;
4527 }
4528 
4529 #ifdef TARGET_NR_ipc
4530 /* ??? This only works with linear mappings.  */
4531 /* do_ipc() must return target values and target errnos. */
4532 static abi_long do_ipc(CPUArchState *cpu_env,
4533                        unsigned int call, abi_long first,
4534                        abi_long second, abi_long third,
4535                        abi_long ptr, abi_long fifth)
4536 {
4537     int version;
4538     abi_long ret = 0;
4539 
4540     version = call >> 16;
4541     call &= 0xffff;
4542 
4543     switch (call) {
4544     case IPCOP_semop:
4545         ret = do_semtimedop(first, ptr, second, 0, false);
4546         break;
4547     case IPCOP_semtimedop:
4548     /*
4549      * The s390 sys_ipc variant has only five parameters instead of six
4550      * (as for default variant) and the only difference is the handling of
4551      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4552      * to a struct timespec where the generic variant uses fifth parameter.
4553      */
4554 #if defined(TARGET_S390X)
4555         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4556 #else
4557         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4558 #endif
4559         break;
4560 
4561     case IPCOP_semget:
4562         ret = get_errno(semget(first, second, third));
4563         break;
4564 
4565     case IPCOP_semctl: {
4566         /* The semun argument to semctl is passed by value, so dereference the
4567          * ptr argument. */
4568         abi_ulong atptr;
4569         get_user_ual(atptr, ptr);
4570         ret = do_semctl(first, second, third, atptr);
4571         break;
4572     }
4573 
4574     case IPCOP_msgget:
4575         ret = get_errno(msgget(first, second));
4576         break;
4577 
4578     case IPCOP_msgsnd:
4579         ret = do_msgsnd(first, ptr, second, third);
4580         break;
4581 
4582     case IPCOP_msgctl:
4583         ret = do_msgctl(first, second, ptr);
4584         break;
4585 
4586     case IPCOP_msgrcv:
4587         switch (version) {
4588         case 0:
4589             {
4590                 struct target_ipc_kludge {
4591                     abi_long msgp;
4592                     abi_long msgtyp;
4593                 } *tmp;
4594 
4595                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4596                     ret = -TARGET_EFAULT;
4597                     break;
4598                 }
4599 
4600                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4601 
4602                 unlock_user_struct(tmp, ptr, 0);
4603                 break;
4604             }
4605         default:
4606             ret = do_msgrcv(first, ptr, second, fifth, third);
4607         }
4608         break;
4609 
4610     case IPCOP_shmat:
4611         switch (version) {
4612         default:
4613         {
4614             abi_ulong raddr;
4615             raddr = target_shmat(cpu_env, first, ptr, second);
4616             if (is_error(raddr))
4617                 return get_errno(raddr);
4618             if (put_user_ual(raddr, third))
4619                 return -TARGET_EFAULT;
4620             break;
4621         }
4622         case 1:
4623             ret = -TARGET_EINVAL;
4624             break;
4625         }
4626 	break;
4627     case IPCOP_shmdt:
4628         ret = target_shmdt(ptr);
4629 	break;
4630 
4631     case IPCOP_shmget:
4632 	/* IPC_* flag values are the same on all linux platforms */
4633 	ret = get_errno(shmget(first, second, third));
4634 	break;
4635 
4636 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4637     case IPCOP_shmctl:
4638         ret = do_shmctl(first, second, ptr);
4639         break;
4640     default:
4641         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4642                       call, version);
4643 	ret = -TARGET_ENOSYS;
4644 	break;
4645     }
4646     return ret;
4647 }
4648 #endif
4649 
4650 /* kernel structure types definitions */
4651 
4652 #define STRUCT(name, ...) STRUCT_ ## name,
4653 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4654 enum {
4655 #include "syscall_types.h"
4656 STRUCT_MAX
4657 };
4658 #undef STRUCT
4659 #undef STRUCT_SPECIAL
4660 
4661 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4662 #define STRUCT_SPECIAL(name)
4663 #include "syscall_types.h"
4664 #undef STRUCT
4665 #undef STRUCT_SPECIAL
4666 
4667 #define MAX_STRUCT_SIZE 4096
4668 
4669 #ifdef CONFIG_FIEMAP
4670 /* So fiemap access checks don't overflow on 32 bit systems.
4671  * This is very slightly smaller than the limit imposed by
4672  * the underlying kernel.
4673  */
4674 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4675                             / sizeof(struct fiemap_extent))
4676 
4677 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4678                                        int fd, int cmd, abi_long arg)
4679 {
4680     /* The parameter for this ioctl is a struct fiemap followed
4681      * by an array of struct fiemap_extent whose size is set
4682      * in fiemap->fm_extent_count. The array is filled in by the
4683      * ioctl.
4684      */
4685     int target_size_in, target_size_out;
4686     struct fiemap *fm;
4687     const argtype *arg_type = ie->arg_type;
4688     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4689     void *argptr, *p;
4690     abi_long ret;
4691     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4692     uint32_t outbufsz;
4693     int free_fm = 0;
4694 
4695     assert(arg_type[0] == TYPE_PTR);
4696     assert(ie->access == IOC_RW);
4697     arg_type++;
4698     target_size_in = thunk_type_size(arg_type, 0);
4699     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4700     if (!argptr) {
4701         return -TARGET_EFAULT;
4702     }
4703     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4704     unlock_user(argptr, arg, 0);
4705     fm = (struct fiemap *)buf_temp;
4706     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4707         return -TARGET_EINVAL;
4708     }
4709 
4710     outbufsz = sizeof (*fm) +
4711         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4712 
4713     if (outbufsz > MAX_STRUCT_SIZE) {
4714         /* We can't fit all the extents into the fixed size buffer.
4715          * Allocate one that is large enough and use it instead.
4716          */
4717         fm = g_try_malloc(outbufsz);
4718         if (!fm) {
4719             return -TARGET_ENOMEM;
4720         }
4721         memcpy(fm, buf_temp, sizeof(struct fiemap));
4722         free_fm = 1;
4723     }
4724     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4725     if (!is_error(ret)) {
4726         target_size_out = target_size_in;
4727         /* An extent_count of 0 means we were only counting the extents
4728          * so there are no structs to copy
4729          */
4730         if (fm->fm_extent_count != 0) {
4731             target_size_out += fm->fm_mapped_extents * extent_size;
4732         }
4733         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4734         if (!argptr) {
4735             ret = -TARGET_EFAULT;
4736         } else {
4737             /* Convert the struct fiemap */
4738             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4739             if (fm->fm_extent_count != 0) {
4740                 p = argptr + target_size_in;
4741                 /* ...and then all the struct fiemap_extents */
4742                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4743                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4744                                   THUNK_TARGET);
4745                     p += extent_size;
4746                 }
4747             }
4748             unlock_user(argptr, arg, target_size_out);
4749         }
4750     }
4751     if (free_fm) {
4752         g_free(fm);
4753     }
4754     return ret;
4755 }
4756 #endif
4757 
4758 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4759                                 int fd, int cmd, abi_long arg)
4760 {
4761     const argtype *arg_type = ie->arg_type;
4762     int target_size;
4763     void *argptr;
4764     int ret;
4765     struct ifconf *host_ifconf;
4766     uint32_t outbufsz;
4767     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4768     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4769     int target_ifreq_size;
4770     int nb_ifreq;
4771     int free_buf = 0;
4772     int i;
4773     int target_ifc_len;
4774     abi_long target_ifc_buf;
4775     int host_ifc_len;
4776     char *host_ifc_buf;
4777 
4778     assert(arg_type[0] == TYPE_PTR);
4779     assert(ie->access == IOC_RW);
4780 
4781     arg_type++;
4782     target_size = thunk_type_size(arg_type, 0);
4783 
4784     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4785     if (!argptr)
4786         return -TARGET_EFAULT;
4787     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788     unlock_user(argptr, arg, 0);
4789 
4790     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4791     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4792     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4793 
4794     if (target_ifc_buf != 0) {
4795         target_ifc_len = host_ifconf->ifc_len;
4796         nb_ifreq = target_ifc_len / target_ifreq_size;
4797         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4798 
4799         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4800         if (outbufsz > MAX_STRUCT_SIZE) {
4801             /*
4802              * We can't fit all the extents into the fixed size buffer.
4803              * Allocate one that is large enough and use it instead.
4804              */
4805             host_ifconf = g_try_malloc(outbufsz);
4806             if (!host_ifconf) {
4807                 return -TARGET_ENOMEM;
4808             }
4809             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4810             free_buf = 1;
4811         }
4812         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4813 
4814         host_ifconf->ifc_len = host_ifc_len;
4815     } else {
4816       host_ifc_buf = NULL;
4817     }
4818     host_ifconf->ifc_buf = host_ifc_buf;
4819 
4820     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4821     if (!is_error(ret)) {
4822 	/* convert host ifc_len to target ifc_len */
4823 
4824         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4825         target_ifc_len = nb_ifreq * target_ifreq_size;
4826         host_ifconf->ifc_len = target_ifc_len;
4827 
4828 	/* restore target ifc_buf */
4829 
4830         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4831 
4832 	/* copy struct ifconf to target user */
4833 
4834         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4835         if (!argptr)
4836             return -TARGET_EFAULT;
4837         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4838         unlock_user(argptr, arg, target_size);
4839 
4840         if (target_ifc_buf != 0) {
4841             /* copy ifreq[] to target user */
4842             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4843             for (i = 0; i < nb_ifreq ; i++) {
4844                 thunk_convert(argptr + i * target_ifreq_size,
4845                               host_ifc_buf + i * sizeof(struct ifreq),
4846                               ifreq_arg_type, THUNK_TARGET);
4847             }
4848             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4849         }
4850     }
4851 
4852     if (free_buf) {
4853         g_free(host_ifconf);
4854     }
4855 
4856     return ret;
4857 }
4858 
4859 #if defined(CONFIG_USBFS)
4860 #if HOST_LONG_BITS > 64
4861 #error USBDEVFS thunks do not support >64 bit hosts yet.
4862 #endif
4863 struct live_urb {
4864     uint64_t target_urb_adr;
4865     uint64_t target_buf_adr;
4866     char *target_buf_ptr;
4867     struct usbdevfs_urb host_urb;
4868 };
4869 
4870 static GHashTable *usbdevfs_urb_hashtable(void)
4871 {
4872     static GHashTable *urb_hashtable;
4873 
4874     if (!urb_hashtable) {
4875         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4876     }
4877     return urb_hashtable;
4878 }
4879 
4880 static void urb_hashtable_insert(struct live_urb *urb)
4881 {
4882     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4883     g_hash_table_insert(urb_hashtable, urb, urb);
4884 }
4885 
4886 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4887 {
4888     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4889     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4890 }
4891 
4892 static void urb_hashtable_remove(struct live_urb *urb)
4893 {
4894     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4895     g_hash_table_remove(urb_hashtable, urb);
4896 }
4897 
4898 static abi_long
4899 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4900                           int fd, int cmd, abi_long arg)
4901 {
4902     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4903     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4904     struct live_urb *lurb;
4905     void *argptr;
4906     uint64_t hurb;
4907     int target_size;
4908     uintptr_t target_urb_adr;
4909     abi_long ret;
4910 
4911     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4912 
4913     memset(buf_temp, 0, sizeof(uint64_t));
4914     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4915     if (is_error(ret)) {
4916         return ret;
4917     }
4918 
4919     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4920     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4921     if (!lurb->target_urb_adr) {
4922         return -TARGET_EFAULT;
4923     }
4924     urb_hashtable_remove(lurb);
4925     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4926         lurb->host_urb.buffer_length);
4927     lurb->target_buf_ptr = NULL;
4928 
4929     /* restore the guest buffer pointer */
4930     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4931 
4932     /* update the guest urb struct */
4933     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4934     if (!argptr) {
4935         g_free(lurb);
4936         return -TARGET_EFAULT;
4937     }
4938     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4939     unlock_user(argptr, lurb->target_urb_adr, target_size);
4940 
4941     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4942     /* write back the urb handle */
4943     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4944     if (!argptr) {
4945         g_free(lurb);
4946         return -TARGET_EFAULT;
4947     }
4948 
4949     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4950     target_urb_adr = lurb->target_urb_adr;
4951     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4952     unlock_user(argptr, arg, target_size);
4953 
4954     g_free(lurb);
4955     return ret;
4956 }
4957 
4958 static abi_long
4959 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4960                              uint8_t *buf_temp __attribute__((unused)),
4961                              int fd, int cmd, abi_long arg)
4962 {
4963     struct live_urb *lurb;
4964 
4965     /* map target address back to host URB with metadata. */
4966     lurb = urb_hashtable_lookup(arg);
4967     if (!lurb) {
4968         return -TARGET_EFAULT;
4969     }
4970     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4971 }
4972 
4973 static abi_long
4974 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4975                             int fd, int cmd, abi_long arg)
4976 {
4977     const argtype *arg_type = ie->arg_type;
4978     int target_size;
4979     abi_long ret;
4980     void *argptr;
4981     int rw_dir;
4982     struct live_urb *lurb;
4983 
4984     /*
4985      * each submitted URB needs to map to a unique ID for the
4986      * kernel, and that unique ID needs to be a pointer to
4987      * host memory.  hence, we need to malloc for each URB.
4988      * isochronous transfers have a variable length struct.
4989      */
4990     arg_type++;
4991     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4992 
4993     /* construct host copy of urb and metadata */
4994     lurb = g_try_new0(struct live_urb, 1);
4995     if (!lurb) {
4996         return -TARGET_ENOMEM;
4997     }
4998 
4999     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5000     if (!argptr) {
5001         g_free(lurb);
5002         return -TARGET_EFAULT;
5003     }
5004     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5005     unlock_user(argptr, arg, 0);
5006 
5007     lurb->target_urb_adr = arg;
5008     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5009 
5010     /* buffer space used depends on endpoint type so lock the entire buffer */
5011     /* control type urbs should check the buffer contents for true direction */
5012     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5013     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5014         lurb->host_urb.buffer_length, 1);
5015     if (lurb->target_buf_ptr == NULL) {
5016         g_free(lurb);
5017         return -TARGET_EFAULT;
5018     }
5019 
5020     /* update buffer pointer in host copy */
5021     lurb->host_urb.buffer = lurb->target_buf_ptr;
5022 
5023     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5024     if (is_error(ret)) {
5025         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5026         g_free(lurb);
5027     } else {
5028         urb_hashtable_insert(lurb);
5029     }
5030 
5031     return ret;
5032 }
5033 #endif /* CONFIG_USBFS */
5034 
5035 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5036                             int cmd, abi_long arg)
5037 {
5038     void *argptr;
5039     struct dm_ioctl *host_dm;
5040     abi_long guest_data;
5041     uint32_t guest_data_size;
5042     int target_size;
5043     const argtype *arg_type = ie->arg_type;
5044     abi_long ret;
5045     void *big_buf = NULL;
5046     char *host_data;
5047 
5048     arg_type++;
5049     target_size = thunk_type_size(arg_type, 0);
5050     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5051     if (!argptr) {
5052         ret = -TARGET_EFAULT;
5053         goto out;
5054     }
5055     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5056     unlock_user(argptr, arg, 0);
5057 
5058     /* buf_temp is too small, so fetch things into a bigger buffer */
5059     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5060     memcpy(big_buf, buf_temp, target_size);
5061     buf_temp = big_buf;
5062     host_dm = big_buf;
5063 
5064     guest_data = arg + host_dm->data_start;
5065     if ((guest_data - arg) < 0) {
5066         ret = -TARGET_EINVAL;
5067         goto out;
5068     }
5069     guest_data_size = host_dm->data_size - host_dm->data_start;
5070     host_data = (char*)host_dm + host_dm->data_start;
5071 
5072     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5073     if (!argptr) {
5074         ret = -TARGET_EFAULT;
5075         goto out;
5076     }
5077 
5078     switch (ie->host_cmd) {
5079     case DM_REMOVE_ALL:
5080     case DM_LIST_DEVICES:
5081     case DM_DEV_CREATE:
5082     case DM_DEV_REMOVE:
5083     case DM_DEV_SUSPEND:
5084     case DM_DEV_STATUS:
5085     case DM_DEV_WAIT:
5086     case DM_TABLE_STATUS:
5087     case DM_TABLE_CLEAR:
5088     case DM_TABLE_DEPS:
5089     case DM_LIST_VERSIONS:
5090         /* no input data */
5091         break;
5092     case DM_DEV_RENAME:
5093     case DM_DEV_SET_GEOMETRY:
5094         /* data contains only strings */
5095         memcpy(host_data, argptr, guest_data_size);
5096         break;
5097     case DM_TARGET_MSG:
5098         memcpy(host_data, argptr, guest_data_size);
5099         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5100         break;
5101     case DM_TABLE_LOAD:
5102     {
5103         void *gspec = argptr;
5104         void *cur_data = host_data;
5105         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5106         int spec_size = thunk_type_size(dm_arg_type, 0);
5107         int i;
5108 
5109         for (i = 0; i < host_dm->target_count; i++) {
5110             struct dm_target_spec *spec = cur_data;
5111             uint32_t next;
5112             int slen;
5113 
5114             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5115             slen = strlen((char*)gspec + spec_size) + 1;
5116             next = spec->next;
5117             spec->next = sizeof(*spec) + slen;
5118             strcpy((char*)&spec[1], gspec + spec_size);
5119             gspec += next;
5120             cur_data += spec->next;
5121         }
5122         break;
5123     }
5124     default:
5125         ret = -TARGET_EINVAL;
5126         unlock_user(argptr, guest_data, 0);
5127         goto out;
5128     }
5129     unlock_user(argptr, guest_data, 0);
5130 
5131     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5132     if (!is_error(ret)) {
5133         guest_data = arg + host_dm->data_start;
5134         guest_data_size = host_dm->data_size - host_dm->data_start;
5135         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5136         switch (ie->host_cmd) {
5137         case DM_REMOVE_ALL:
5138         case DM_DEV_CREATE:
5139         case DM_DEV_REMOVE:
5140         case DM_DEV_RENAME:
5141         case DM_DEV_SUSPEND:
5142         case DM_DEV_STATUS:
5143         case DM_TABLE_LOAD:
5144         case DM_TABLE_CLEAR:
5145         case DM_TARGET_MSG:
5146         case DM_DEV_SET_GEOMETRY:
5147             /* no return data */
5148             break;
5149         case DM_LIST_DEVICES:
5150         {
5151             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5152             uint32_t remaining_data = guest_data_size;
5153             void *cur_data = argptr;
5154             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5155             int nl_size = 12; /* can't use thunk_size due to alignment */
5156 
5157             while (1) {
5158                 uint32_t next = nl->next;
5159                 if (next) {
5160                     nl->next = nl_size + (strlen(nl->name) + 1);
5161                 }
5162                 if (remaining_data < nl->next) {
5163                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5164                     break;
5165                 }
5166                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5167                 strcpy(cur_data + nl_size, nl->name);
5168                 cur_data += nl->next;
5169                 remaining_data -= nl->next;
5170                 if (!next) {
5171                     break;
5172                 }
5173                 nl = (void*)nl + next;
5174             }
5175             break;
5176         }
5177         case DM_DEV_WAIT:
5178         case DM_TABLE_STATUS:
5179         {
5180             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5181             void *cur_data = argptr;
5182             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5183             int spec_size = thunk_type_size(dm_arg_type, 0);
5184             int i;
5185 
5186             for (i = 0; i < host_dm->target_count; i++) {
5187                 uint32_t next = spec->next;
5188                 int slen = strlen((char*)&spec[1]) + 1;
5189                 spec->next = (cur_data - argptr) + spec_size + slen;
5190                 if (guest_data_size < spec->next) {
5191                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5192                     break;
5193                 }
5194                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5195                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5196                 cur_data = argptr + spec->next;
5197                 spec = (void*)host_dm + host_dm->data_start + next;
5198             }
5199             break;
5200         }
5201         case DM_TABLE_DEPS:
5202         {
5203             void *hdata = (void*)host_dm + host_dm->data_start;
5204             int count = *(uint32_t*)hdata;
5205             uint64_t *hdev = hdata + 8;
5206             uint64_t *gdev = argptr + 8;
5207             int i;
5208 
5209             *(uint32_t*)argptr = tswap32(count);
5210             for (i = 0; i < count; i++) {
5211                 *gdev = tswap64(*hdev);
5212                 gdev++;
5213                 hdev++;
5214             }
5215             break;
5216         }
5217         case DM_LIST_VERSIONS:
5218         {
5219             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5220             uint32_t remaining_data = guest_data_size;
5221             void *cur_data = argptr;
5222             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5223             int vers_size = thunk_type_size(dm_arg_type, 0);
5224 
5225             while (1) {
5226                 uint32_t next = vers->next;
5227                 if (next) {
5228                     vers->next = vers_size + (strlen(vers->name) + 1);
5229                 }
5230                 if (remaining_data < vers->next) {
5231                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5232                     break;
5233                 }
5234                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5235                 strcpy(cur_data + vers_size, vers->name);
5236                 cur_data += vers->next;
5237                 remaining_data -= vers->next;
5238                 if (!next) {
5239                     break;
5240                 }
5241                 vers = (void*)vers + next;
5242             }
5243             break;
5244         }
5245         default:
5246             unlock_user(argptr, guest_data, 0);
5247             ret = -TARGET_EINVAL;
5248             goto out;
5249         }
5250         unlock_user(argptr, guest_data, guest_data_size);
5251 
5252         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5253         if (!argptr) {
5254             ret = -TARGET_EFAULT;
5255             goto out;
5256         }
5257         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5258         unlock_user(argptr, arg, target_size);
5259     }
5260 out:
5261     g_free(big_buf);
5262     return ret;
5263 }
5264 
5265 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5266                                int cmd, abi_long arg)
5267 {
5268     void *argptr;
5269     int target_size;
5270     const argtype *arg_type = ie->arg_type;
5271     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5272     abi_long ret;
5273 
5274     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5275     struct blkpg_partition host_part;
5276 
5277     /* Read and convert blkpg */
5278     arg_type++;
5279     target_size = thunk_type_size(arg_type, 0);
5280     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5281     if (!argptr) {
5282         ret = -TARGET_EFAULT;
5283         goto out;
5284     }
5285     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5286     unlock_user(argptr, arg, 0);
5287 
5288     switch (host_blkpg->op) {
5289     case BLKPG_ADD_PARTITION:
5290     case BLKPG_DEL_PARTITION:
5291         /* payload is struct blkpg_partition */
5292         break;
5293     default:
5294         /* Unknown opcode */
5295         ret = -TARGET_EINVAL;
5296         goto out;
5297     }
5298 
5299     /* Read and convert blkpg->data */
5300     arg = (abi_long)(uintptr_t)host_blkpg->data;
5301     target_size = thunk_type_size(part_arg_type, 0);
5302     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5303     if (!argptr) {
5304         ret = -TARGET_EFAULT;
5305         goto out;
5306     }
5307     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5308     unlock_user(argptr, arg, 0);
5309 
5310     /* Swizzle the data pointer to our local copy and call! */
5311     host_blkpg->data = &host_part;
5312     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5313 
5314 out:
5315     return ret;
5316 }
5317 
5318 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5319                                 int fd, int cmd, abi_long arg)
5320 {
5321     const argtype *arg_type = ie->arg_type;
5322     const StructEntry *se;
5323     const argtype *field_types;
5324     const int *dst_offsets, *src_offsets;
5325     int target_size;
5326     void *argptr;
5327     abi_ulong *target_rt_dev_ptr = NULL;
5328     unsigned long *host_rt_dev_ptr = NULL;
5329     abi_long ret;
5330     int i;
5331 
5332     assert(ie->access == IOC_W);
5333     assert(*arg_type == TYPE_PTR);
5334     arg_type++;
5335     assert(*arg_type == TYPE_STRUCT);
5336     target_size = thunk_type_size(arg_type, 0);
5337     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5338     if (!argptr) {
5339         return -TARGET_EFAULT;
5340     }
5341     arg_type++;
5342     assert(*arg_type == (int)STRUCT_rtentry);
5343     se = struct_entries + *arg_type++;
5344     assert(se->convert[0] == NULL);
5345     /* convert struct here to be able to catch rt_dev string */
5346     field_types = se->field_types;
5347     dst_offsets = se->field_offsets[THUNK_HOST];
5348     src_offsets = se->field_offsets[THUNK_TARGET];
5349     for (i = 0; i < se->nb_fields; i++) {
5350         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5351             assert(*field_types == TYPE_PTRVOID);
5352             target_rt_dev_ptr = argptr + src_offsets[i];
5353             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5354             if (*target_rt_dev_ptr != 0) {
5355                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5356                                                   tswapal(*target_rt_dev_ptr));
5357                 if (!*host_rt_dev_ptr) {
5358                     unlock_user(argptr, arg, 0);
5359                     return -TARGET_EFAULT;
5360                 }
5361             } else {
5362                 *host_rt_dev_ptr = 0;
5363             }
5364             field_types++;
5365             continue;
5366         }
5367         field_types = thunk_convert(buf_temp + dst_offsets[i],
5368                                     argptr + src_offsets[i],
5369                                     field_types, THUNK_HOST);
5370     }
5371     unlock_user(argptr, arg, 0);
5372 
5373     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5374 
5375     assert(host_rt_dev_ptr != NULL);
5376     assert(target_rt_dev_ptr != NULL);
5377     if (*host_rt_dev_ptr != 0) {
5378         unlock_user((void *)*host_rt_dev_ptr,
5379                     *target_rt_dev_ptr, 0);
5380     }
5381     return ret;
5382 }
5383 
5384 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5385                                      int fd, int cmd, abi_long arg)
5386 {
5387     int sig = target_to_host_signal(arg);
5388     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5389 }
5390 
5391 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5392                                     int fd, int cmd, abi_long arg)
5393 {
5394     struct timeval tv;
5395     abi_long ret;
5396 
5397     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5398     if (is_error(ret)) {
5399         return ret;
5400     }
5401 
5402     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5403         if (copy_to_user_timeval(arg, &tv)) {
5404             return -TARGET_EFAULT;
5405         }
5406     } else {
5407         if (copy_to_user_timeval64(arg, &tv)) {
5408             return -TARGET_EFAULT;
5409         }
5410     }
5411 
5412     return ret;
5413 }
5414 
5415 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5416                                       int fd, int cmd, abi_long arg)
5417 {
5418     struct timespec ts;
5419     abi_long ret;
5420 
5421     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5422     if (is_error(ret)) {
5423         return ret;
5424     }
5425 
5426     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5427         if (host_to_target_timespec(arg, &ts)) {
5428             return -TARGET_EFAULT;
5429         }
5430     } else{
5431         if (host_to_target_timespec64(arg, &ts)) {
5432             return -TARGET_EFAULT;
5433         }
5434     }
5435 
5436     return ret;
5437 }
5438 
5439 #ifdef TIOCGPTPEER
5440 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5441                                      int fd, int cmd, abi_long arg)
5442 {
5443     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5444     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5445 }
5446 #endif
5447 
5448 #ifdef HAVE_DRM_H
5449 
5450 static void unlock_drm_version(struct drm_version *host_ver,
5451                                struct target_drm_version *target_ver,
5452                                bool copy)
5453 {
5454     unlock_user(host_ver->name, target_ver->name,
5455                                 copy ? host_ver->name_len : 0);
5456     unlock_user(host_ver->date, target_ver->date,
5457                                 copy ? host_ver->date_len : 0);
5458     unlock_user(host_ver->desc, target_ver->desc,
5459                                 copy ? host_ver->desc_len : 0);
5460 }
5461 
5462 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5463                                           struct target_drm_version *target_ver)
5464 {
5465     memset(host_ver, 0, sizeof(*host_ver));
5466 
5467     __get_user(host_ver->name_len, &target_ver->name_len);
5468     if (host_ver->name_len) {
5469         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5470                                    target_ver->name_len, 0);
5471         if (!host_ver->name) {
5472             return -EFAULT;
5473         }
5474     }
5475 
5476     __get_user(host_ver->date_len, &target_ver->date_len);
5477     if (host_ver->date_len) {
5478         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5479                                    target_ver->date_len, 0);
5480         if (!host_ver->date) {
5481             goto err;
5482         }
5483     }
5484 
5485     __get_user(host_ver->desc_len, &target_ver->desc_len);
5486     if (host_ver->desc_len) {
5487         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5488                                    target_ver->desc_len, 0);
5489         if (!host_ver->desc) {
5490             goto err;
5491         }
5492     }
5493 
5494     return 0;
5495 err:
5496     unlock_drm_version(host_ver, target_ver, false);
5497     return -EFAULT;
5498 }
5499 
5500 static inline void host_to_target_drmversion(
5501                                           struct target_drm_version *target_ver,
5502                                           struct drm_version *host_ver)
5503 {
5504     __put_user(host_ver->version_major, &target_ver->version_major);
5505     __put_user(host_ver->version_minor, &target_ver->version_minor);
5506     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5507     __put_user(host_ver->name_len, &target_ver->name_len);
5508     __put_user(host_ver->date_len, &target_ver->date_len);
5509     __put_user(host_ver->desc_len, &target_ver->desc_len);
5510     unlock_drm_version(host_ver, target_ver, true);
5511 }
5512 
5513 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                              int fd, int cmd, abi_long arg)
5515 {
5516     struct drm_version *ver;
5517     struct target_drm_version *target_ver;
5518     abi_long ret;
5519 
5520     switch (ie->host_cmd) {
5521     case DRM_IOCTL_VERSION:
5522         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5523             return -TARGET_EFAULT;
5524         }
5525         ver = (struct drm_version *)buf_temp;
5526         ret = target_to_host_drmversion(ver, target_ver);
5527         if (!is_error(ret)) {
5528             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5529             if (is_error(ret)) {
5530                 unlock_drm_version(ver, target_ver, false);
5531             } else {
5532                 host_to_target_drmversion(target_ver, ver);
5533             }
5534         }
5535         unlock_user_struct(target_ver, arg, 0);
5536         return ret;
5537     }
5538     return -TARGET_ENOSYS;
5539 }
5540 
5541 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5542                                            struct drm_i915_getparam *gparam,
5543                                            int fd, abi_long arg)
5544 {
5545     abi_long ret;
5546     int value;
5547     struct target_drm_i915_getparam *target_gparam;
5548 
5549     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5550         return -TARGET_EFAULT;
5551     }
5552 
5553     __get_user(gparam->param, &target_gparam->param);
5554     gparam->value = &value;
5555     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5556     put_user_s32(value, target_gparam->value);
5557 
5558     unlock_user_struct(target_gparam, arg, 0);
5559     return ret;
5560 }
5561 
5562 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5563                                   int fd, int cmd, abi_long arg)
5564 {
5565     switch (ie->host_cmd) {
5566     case DRM_IOCTL_I915_GETPARAM:
5567         return do_ioctl_drm_i915_getparam(ie,
5568                                           (struct drm_i915_getparam *)buf_temp,
5569                                           fd, arg);
5570     default:
5571         return -TARGET_ENOSYS;
5572     }
5573 }
5574 
5575 #endif
5576 
5577 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5578                                         int fd, int cmd, abi_long arg)
5579 {
5580     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5581     struct tun_filter *target_filter;
5582     char *target_addr;
5583 
5584     assert(ie->access == IOC_W);
5585 
5586     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5587     if (!target_filter) {
5588         return -TARGET_EFAULT;
5589     }
5590     filter->flags = tswap16(target_filter->flags);
5591     filter->count = tswap16(target_filter->count);
5592     unlock_user(target_filter, arg, 0);
5593 
5594     if (filter->count) {
5595         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5596             MAX_STRUCT_SIZE) {
5597             return -TARGET_EFAULT;
5598         }
5599 
5600         target_addr = lock_user(VERIFY_READ,
5601                                 arg + offsetof(struct tun_filter, addr),
5602                                 filter->count * ETH_ALEN, 1);
5603         if (!target_addr) {
5604             return -TARGET_EFAULT;
5605         }
5606         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5607         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5608     }
5609 
5610     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5611 }
5612 
5613 IOCTLEntry ioctl_entries[] = {
5614 #define IOCTL(cmd, access, ...) \
5615     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5616 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5617     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5618 #define IOCTL_IGNORE(cmd) \
5619     { TARGET_ ## cmd, 0, #cmd },
5620 #include "ioctls.h"
5621     { 0, 0, },
5622 };
5623 
5624 /* ??? Implement proper locking for ioctls.  */
5625 /* do_ioctl() Must return target values and target errnos. */
5626 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5627 {
5628     const IOCTLEntry *ie;
5629     const argtype *arg_type;
5630     abi_long ret;
5631     uint8_t buf_temp[MAX_STRUCT_SIZE];
5632     int target_size;
5633     void *argptr;
5634 
5635     ie = ioctl_entries;
5636     for(;;) {
5637         if (ie->target_cmd == 0) {
5638             qemu_log_mask(
5639                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5640             return -TARGET_ENOTTY;
5641         }
5642         if (ie->target_cmd == cmd)
5643             break;
5644         ie++;
5645     }
5646     arg_type = ie->arg_type;
5647     if (ie->do_ioctl) {
5648         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5649     } else if (!ie->host_cmd) {
5650         /* Some architectures define BSD ioctls in their headers
5651            that are not implemented in Linux.  */
5652         return -TARGET_ENOTTY;
5653     }
5654 
5655     switch(arg_type[0]) {
5656     case TYPE_NULL:
5657         /* no argument */
5658         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5659         break;
5660     case TYPE_PTRVOID:
5661     case TYPE_INT:
5662     case TYPE_LONG:
5663     case TYPE_ULONG:
5664         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5665         break;
5666     case TYPE_PTR:
5667         arg_type++;
5668         target_size = thunk_type_size(arg_type, 0);
5669         switch(ie->access) {
5670         case IOC_R:
5671             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5672             if (!is_error(ret)) {
5673                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5674                 if (!argptr)
5675                     return -TARGET_EFAULT;
5676                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5677                 unlock_user(argptr, arg, target_size);
5678             }
5679             break;
5680         case IOC_W:
5681             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5682             if (!argptr)
5683                 return -TARGET_EFAULT;
5684             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5685             unlock_user(argptr, arg, 0);
5686             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5687             break;
5688         default:
5689         case IOC_RW:
5690             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5691             if (!argptr)
5692                 return -TARGET_EFAULT;
5693             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5694             unlock_user(argptr, arg, 0);
5695             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5696             if (!is_error(ret)) {
5697                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5698                 if (!argptr)
5699                     return -TARGET_EFAULT;
5700                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5701                 unlock_user(argptr, arg, target_size);
5702             }
5703             break;
5704         }
5705         break;
5706     default:
5707         qemu_log_mask(LOG_UNIMP,
5708                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5709                       (long)cmd, arg_type[0]);
5710         ret = -TARGET_ENOTTY;
5711         break;
5712     }
5713     return ret;
5714 }
5715 
5716 static const bitmask_transtbl iflag_tbl[] = {
5717         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5718         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5719         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5720         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5721         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5722         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5723         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5724         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5725         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5726         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5727         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5728         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5729         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5730         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5731         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5732 };
5733 
5734 static const bitmask_transtbl oflag_tbl[] = {
5735 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5736 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5737 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5738 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5739 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5740 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5741 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5742 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5743 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5744 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5745 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5746 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5747 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5748 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5749 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5750 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5751 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5752 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5753 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5754 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5755 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5756 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5757 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5758 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5759 };
5760 
5761 static const bitmask_transtbl cflag_tbl[] = {
5762 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5763 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5764 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5765 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5766 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5767 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5768 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5769 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5770 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5771 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5772 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5773 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5774 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5775 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5776 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5777 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5778 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5779 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5780 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5781 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5782 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5783 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5784 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5785 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5786 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5787 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5788 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5789 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5790 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5791 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5792 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5793 };
5794 
5795 static const bitmask_transtbl lflag_tbl[] = {
5796   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5797   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5798   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5799   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5800   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5801   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5802   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5803   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5804   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5805   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5806   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5807   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5808   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5809   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5810   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5811   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5812 };
5813 
5814 static void target_to_host_termios (void *dst, const void *src)
5815 {
5816     struct host_termios *host = dst;
5817     const struct target_termios *target = src;
5818 
5819     host->c_iflag =
5820         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5821     host->c_oflag =
5822         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5823     host->c_cflag =
5824         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5825     host->c_lflag =
5826         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5827     host->c_line = target->c_line;
5828 
5829     memset(host->c_cc, 0, sizeof(host->c_cc));
5830     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5831     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5832     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5833     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5834     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5835     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5836     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5837     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5838     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5839     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5840     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5841     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5842     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5843     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5844     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5845     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5846     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5847 }
5848 
5849 static void host_to_target_termios (void *dst, const void *src)
5850 {
5851     struct target_termios *target = dst;
5852     const struct host_termios *host = src;
5853 
5854     target->c_iflag =
5855         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5856     target->c_oflag =
5857         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5858     target->c_cflag =
5859         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5860     target->c_lflag =
5861         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5862     target->c_line = host->c_line;
5863 
5864     memset(target->c_cc, 0, sizeof(target->c_cc));
5865     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5866     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5867     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5868     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5869     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5870     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5871     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5872     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5873     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5874     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5875     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5876     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5877     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5878     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5879     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5880     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5881     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5882 }
5883 
5884 static const StructEntry struct_termios_def = {
5885     .convert = { host_to_target_termios, target_to_host_termios },
5886     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5887     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5888     .print = print_termios,
5889 };
5890 
5891 /* If the host does not provide these bits, they may be safely discarded. */
5892 #ifndef MAP_SYNC
5893 #define MAP_SYNC 0
5894 #endif
5895 #ifndef MAP_UNINITIALIZED
5896 #define MAP_UNINITIALIZED 0
5897 #endif
5898 
5899 static const bitmask_transtbl mmap_flags_tbl[] = {
5900     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5901     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5902       MAP_ANONYMOUS, MAP_ANONYMOUS },
5903     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5904       MAP_GROWSDOWN, MAP_GROWSDOWN },
5905     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5906       MAP_DENYWRITE, MAP_DENYWRITE },
5907     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5908       MAP_EXECUTABLE, MAP_EXECUTABLE },
5909     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5910     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5911       MAP_NORESERVE, MAP_NORESERVE },
5912     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5913     /* MAP_STACK had been ignored by the kernel for quite some time.
5914        Recognize it for the target insofar as we do not want to pass
5915        it through to the host.  */
5916     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5917     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5918     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5919     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5920       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5921     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5922       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5923 };
5924 
5925 /*
5926  * Arrange for legacy / undefined architecture specific flags to be
5927  * ignored by mmap handling code.
5928  */
5929 #ifndef TARGET_MAP_32BIT
5930 #define TARGET_MAP_32BIT 0
5931 #endif
5932 #ifndef TARGET_MAP_HUGE_2MB
5933 #define TARGET_MAP_HUGE_2MB 0
5934 #endif
5935 #ifndef TARGET_MAP_HUGE_1GB
5936 #define TARGET_MAP_HUGE_1GB 0
5937 #endif
5938 
5939 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5940                         int target_flags, int fd, off_t offset)
5941 {
5942     /*
5943      * The historical set of flags that all mmap types implicitly support.
5944      */
5945     enum {
5946         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5947                                | TARGET_MAP_PRIVATE
5948                                | TARGET_MAP_FIXED
5949                                | TARGET_MAP_ANONYMOUS
5950                                | TARGET_MAP_DENYWRITE
5951                                | TARGET_MAP_EXECUTABLE
5952                                | TARGET_MAP_UNINITIALIZED
5953                                | TARGET_MAP_GROWSDOWN
5954                                | TARGET_MAP_LOCKED
5955                                | TARGET_MAP_NORESERVE
5956                                | TARGET_MAP_POPULATE
5957                                | TARGET_MAP_NONBLOCK
5958                                | TARGET_MAP_STACK
5959                                | TARGET_MAP_HUGETLB
5960                                | TARGET_MAP_32BIT
5961                                | TARGET_MAP_HUGE_2MB
5962                                | TARGET_MAP_HUGE_1GB
5963     };
5964     int host_flags;
5965 
5966     switch (target_flags & TARGET_MAP_TYPE) {
5967     case TARGET_MAP_PRIVATE:
5968         host_flags = MAP_PRIVATE;
5969         break;
5970     case TARGET_MAP_SHARED:
5971         host_flags = MAP_SHARED;
5972         break;
5973     case TARGET_MAP_SHARED_VALIDATE:
5974         /*
5975          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5976          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5977          */
5978         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5979             return -TARGET_EOPNOTSUPP;
5980         }
5981         host_flags = MAP_SHARED_VALIDATE;
5982         if (target_flags & TARGET_MAP_SYNC) {
5983             host_flags |= MAP_SYNC;
5984         }
5985         break;
5986     default:
5987         return -TARGET_EINVAL;
5988     }
5989     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5990 
5991     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5992 }
5993 
5994 /*
5995  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5996  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5997  */
5998 #if defined(TARGET_I386)
5999 
6000 /* NOTE: there is really one LDT for all the threads */
6001 static uint8_t *ldt_table;
6002 
6003 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6004 {
6005     int size;
6006     void *p;
6007 
6008     if (!ldt_table)
6009         return 0;
6010     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6011     if (size > bytecount)
6012         size = bytecount;
6013     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6014     if (!p)
6015         return -TARGET_EFAULT;
6016     /* ??? Should this by byteswapped?  */
6017     memcpy(p, ldt_table, size);
6018     unlock_user(p, ptr, size);
6019     return size;
6020 }
6021 
6022 /* XXX: add locking support */
6023 static abi_long write_ldt(CPUX86State *env,
6024                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6025 {
6026     struct target_modify_ldt_ldt_s ldt_info;
6027     struct target_modify_ldt_ldt_s *target_ldt_info;
6028     int seg_32bit, contents, read_exec_only, limit_in_pages;
6029     int seg_not_present, useable, lm;
6030     uint32_t *lp, entry_1, entry_2;
6031 
6032     if (bytecount != sizeof(ldt_info))
6033         return -TARGET_EINVAL;
6034     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6035         return -TARGET_EFAULT;
6036     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6037     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6038     ldt_info.limit = tswap32(target_ldt_info->limit);
6039     ldt_info.flags = tswap32(target_ldt_info->flags);
6040     unlock_user_struct(target_ldt_info, ptr, 0);
6041 
6042     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6043         return -TARGET_EINVAL;
6044     seg_32bit = ldt_info.flags & 1;
6045     contents = (ldt_info.flags >> 1) & 3;
6046     read_exec_only = (ldt_info.flags >> 3) & 1;
6047     limit_in_pages = (ldt_info.flags >> 4) & 1;
6048     seg_not_present = (ldt_info.flags >> 5) & 1;
6049     useable = (ldt_info.flags >> 6) & 1;
6050 #ifdef TARGET_ABI32
6051     lm = 0;
6052 #else
6053     lm = (ldt_info.flags >> 7) & 1;
6054 #endif
6055     if (contents == 3) {
6056         if (oldmode)
6057             return -TARGET_EINVAL;
6058         if (seg_not_present == 0)
6059             return -TARGET_EINVAL;
6060     }
6061     /* allocate the LDT */
6062     if (!ldt_table) {
6063         env->ldt.base = target_mmap(0,
6064                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6065                                     PROT_READ|PROT_WRITE,
6066                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6067         if (env->ldt.base == -1)
6068             return -TARGET_ENOMEM;
6069         memset(g2h_untagged(env->ldt.base), 0,
6070                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6071         env->ldt.limit = 0xffff;
6072         ldt_table = g2h_untagged(env->ldt.base);
6073     }
6074 
6075     /* NOTE: same code as Linux kernel */
6076     /* Allow LDTs to be cleared by the user. */
6077     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6078         if (oldmode ||
6079             (contents == 0		&&
6080              read_exec_only == 1	&&
6081              seg_32bit == 0		&&
6082              limit_in_pages == 0	&&
6083              seg_not_present == 1	&&
6084              useable == 0 )) {
6085             entry_1 = 0;
6086             entry_2 = 0;
6087             goto install;
6088         }
6089     }
6090 
6091     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6092         (ldt_info.limit & 0x0ffff);
6093     entry_2 = (ldt_info.base_addr & 0xff000000) |
6094         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6095         (ldt_info.limit & 0xf0000) |
6096         ((read_exec_only ^ 1) << 9) |
6097         (contents << 10) |
6098         ((seg_not_present ^ 1) << 15) |
6099         (seg_32bit << 22) |
6100         (limit_in_pages << 23) |
6101         (lm << 21) |
6102         0x7000;
6103     if (!oldmode)
6104         entry_2 |= (useable << 20);
6105 
6106     /* Install the new entry ...  */
6107 install:
6108     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6109     lp[0] = tswap32(entry_1);
6110     lp[1] = tswap32(entry_2);
6111     return 0;
6112 }
6113 
6114 /* specific and weird i386 syscalls */
6115 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6116                               unsigned long bytecount)
6117 {
6118     abi_long ret;
6119 
6120     switch (func) {
6121     case 0:
6122         ret = read_ldt(ptr, bytecount);
6123         break;
6124     case 1:
6125         ret = write_ldt(env, ptr, bytecount, 1);
6126         break;
6127     case 0x11:
6128         ret = write_ldt(env, ptr, bytecount, 0);
6129         break;
6130     default:
6131         ret = -TARGET_ENOSYS;
6132         break;
6133     }
6134     return ret;
6135 }
6136 
6137 #if defined(TARGET_ABI32)
6138 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6139 {
6140     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6141     struct target_modify_ldt_ldt_s ldt_info;
6142     struct target_modify_ldt_ldt_s *target_ldt_info;
6143     int seg_32bit, contents, read_exec_only, limit_in_pages;
6144     int seg_not_present, useable, lm;
6145     uint32_t *lp, entry_1, entry_2;
6146     int i;
6147 
6148     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6149     if (!target_ldt_info)
6150         return -TARGET_EFAULT;
6151     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6152     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6153     ldt_info.limit = tswap32(target_ldt_info->limit);
6154     ldt_info.flags = tswap32(target_ldt_info->flags);
6155     if (ldt_info.entry_number == -1) {
6156         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6157             if (gdt_table[i] == 0) {
6158                 ldt_info.entry_number = i;
6159                 target_ldt_info->entry_number = tswap32(i);
6160                 break;
6161             }
6162         }
6163     }
6164     unlock_user_struct(target_ldt_info, ptr, 1);
6165 
6166     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6167         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6168            return -TARGET_EINVAL;
6169     seg_32bit = ldt_info.flags & 1;
6170     contents = (ldt_info.flags >> 1) & 3;
6171     read_exec_only = (ldt_info.flags >> 3) & 1;
6172     limit_in_pages = (ldt_info.flags >> 4) & 1;
6173     seg_not_present = (ldt_info.flags >> 5) & 1;
6174     useable = (ldt_info.flags >> 6) & 1;
6175 #ifdef TARGET_ABI32
6176     lm = 0;
6177 #else
6178     lm = (ldt_info.flags >> 7) & 1;
6179 #endif
6180 
6181     if (contents == 3) {
6182         if (seg_not_present == 0)
6183             return -TARGET_EINVAL;
6184     }
6185 
6186     /* NOTE: same code as Linux kernel */
6187     /* Allow LDTs to be cleared by the user. */
6188     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6189         if ((contents == 0             &&
6190              read_exec_only == 1       &&
6191              seg_32bit == 0            &&
6192              limit_in_pages == 0       &&
6193              seg_not_present == 1      &&
6194              useable == 0 )) {
6195             entry_1 = 0;
6196             entry_2 = 0;
6197             goto install;
6198         }
6199     }
6200 
6201     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6202         (ldt_info.limit & 0x0ffff);
6203     entry_2 = (ldt_info.base_addr & 0xff000000) |
6204         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6205         (ldt_info.limit & 0xf0000) |
6206         ((read_exec_only ^ 1) << 9) |
6207         (contents << 10) |
6208         ((seg_not_present ^ 1) << 15) |
6209         (seg_32bit << 22) |
6210         (limit_in_pages << 23) |
6211         (useable << 20) |
6212         (lm << 21) |
6213         0x7000;
6214 
6215     /* Install the new entry ...  */
6216 install:
6217     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6218     lp[0] = tswap32(entry_1);
6219     lp[1] = tswap32(entry_2);
6220     return 0;
6221 }
6222 
6223 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6224 {
6225     struct target_modify_ldt_ldt_s *target_ldt_info;
6226     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6227     uint32_t base_addr, limit, flags;
6228     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6229     int seg_not_present, useable, lm;
6230     uint32_t *lp, entry_1, entry_2;
6231 
6232     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6233     if (!target_ldt_info)
6234         return -TARGET_EFAULT;
6235     idx = tswap32(target_ldt_info->entry_number);
6236     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6237         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6238         unlock_user_struct(target_ldt_info, ptr, 1);
6239         return -TARGET_EINVAL;
6240     }
6241     lp = (uint32_t *)(gdt_table + idx);
6242     entry_1 = tswap32(lp[0]);
6243     entry_2 = tswap32(lp[1]);
6244 
6245     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6246     contents = (entry_2 >> 10) & 3;
6247     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6248     seg_32bit = (entry_2 >> 22) & 1;
6249     limit_in_pages = (entry_2 >> 23) & 1;
6250     useable = (entry_2 >> 20) & 1;
6251 #ifdef TARGET_ABI32
6252     lm = 0;
6253 #else
6254     lm = (entry_2 >> 21) & 1;
6255 #endif
6256     flags = (seg_32bit << 0) | (contents << 1) |
6257         (read_exec_only << 3) | (limit_in_pages << 4) |
6258         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6259     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6260     base_addr = (entry_1 >> 16) |
6261         (entry_2 & 0xff000000) |
6262         ((entry_2 & 0xff) << 16);
6263     target_ldt_info->base_addr = tswapal(base_addr);
6264     target_ldt_info->limit = tswap32(limit);
6265     target_ldt_info->flags = tswap32(flags);
6266     unlock_user_struct(target_ldt_info, ptr, 1);
6267     return 0;
6268 }
6269 
6270 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6271 {
6272     return -TARGET_ENOSYS;
6273 }
6274 #else
6275 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6276 {
6277     abi_long ret = 0;
6278     abi_ulong val;
6279     int idx;
6280 
6281     switch(code) {
6282     case TARGET_ARCH_SET_GS:
6283     case TARGET_ARCH_SET_FS:
6284         if (code == TARGET_ARCH_SET_GS)
6285             idx = R_GS;
6286         else
6287             idx = R_FS;
6288         cpu_x86_load_seg(env, idx, 0);
6289         env->segs[idx].base = addr;
6290         break;
6291     case TARGET_ARCH_GET_GS:
6292     case TARGET_ARCH_GET_FS:
6293         if (code == TARGET_ARCH_GET_GS)
6294             idx = R_GS;
6295         else
6296             idx = R_FS;
6297         val = env->segs[idx].base;
6298         if (put_user(val, addr, abi_ulong))
6299             ret = -TARGET_EFAULT;
6300         break;
6301     default:
6302         ret = -TARGET_EINVAL;
6303         break;
6304     }
6305     return ret;
6306 }
6307 #endif /* defined(TARGET_ABI32 */
6308 #endif /* defined(TARGET_I386) */
6309 
6310 /*
6311  * These constants are generic.  Supply any that are missing from the host.
6312  */
6313 #ifndef PR_SET_NAME
6314 # define PR_SET_NAME    15
6315 # define PR_GET_NAME    16
6316 #endif
6317 #ifndef PR_SET_FP_MODE
6318 # define PR_SET_FP_MODE 45
6319 # define PR_GET_FP_MODE 46
6320 # define PR_FP_MODE_FR   (1 << 0)
6321 # define PR_FP_MODE_FRE  (1 << 1)
6322 #endif
6323 #ifndef PR_SVE_SET_VL
6324 # define PR_SVE_SET_VL  50
6325 # define PR_SVE_GET_VL  51
6326 # define PR_SVE_VL_LEN_MASK  0xffff
6327 # define PR_SVE_VL_INHERIT   (1 << 17)
6328 #endif
6329 #ifndef PR_PAC_RESET_KEYS
6330 # define PR_PAC_RESET_KEYS  54
6331 # define PR_PAC_APIAKEY   (1 << 0)
6332 # define PR_PAC_APIBKEY   (1 << 1)
6333 # define PR_PAC_APDAKEY   (1 << 2)
6334 # define PR_PAC_APDBKEY   (1 << 3)
6335 # define PR_PAC_APGAKEY   (1 << 4)
6336 #endif
6337 #ifndef PR_SET_TAGGED_ADDR_CTRL
6338 # define PR_SET_TAGGED_ADDR_CTRL 55
6339 # define PR_GET_TAGGED_ADDR_CTRL 56
6340 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6341 #endif
6342 #ifndef PR_SET_IO_FLUSHER
6343 # define PR_SET_IO_FLUSHER 57
6344 # define PR_GET_IO_FLUSHER 58
6345 #endif
6346 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6347 # define PR_SET_SYSCALL_USER_DISPATCH 59
6348 # define PR_SYS_DISPATCH_OFF 0
6349 # define PR_SYS_DISPATCH_ON 1
6350 # define SYSCALL_DISPATCH_FILTER_ALLOW 0
6351 # define SYSCALL_DISPATCH_FILTER_BLOCK 1
6352 #endif
6353 #ifndef PR_SME_SET_VL
6354 # define PR_SME_SET_VL  63
6355 # define PR_SME_GET_VL  64
6356 # define PR_SME_VL_LEN_MASK  0xffff
6357 # define PR_SME_VL_INHERIT   (1 << 17)
6358 #endif
6359 #ifndef PR_GET_SHADOW_STACK_STATUS
6360 # define PR_GET_SHADOW_STACK_STATUS  74
6361 # define PR_SET_SHADOW_STACK_STATUS  75
6362 # define PR_LOCK_SHADOW_STACK_STATUS 76
6363 #endif
6364 #ifndef SHADOW_STACK_SET_TOKEN
6365 # define SHADOW_STACK_SET_TOKEN  (1u << 0)
6366 #endif
6367 #ifndef SHADOW_STACK_SET_MARKER
6368 # define SHADOW_STACK_SET_MARKER (1u << 1)
6369 #endif
6370 
6371 #include "target_prctl.h"
6372 
6373 static abi_long do_prctl_inval0(CPUArchState *env)
6374 {
6375     return -TARGET_EINVAL;
6376 }
6377 
6378 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6379 {
6380     return -TARGET_EINVAL;
6381 }
6382 
6383 #ifndef do_prctl_get_fp_mode
6384 #define do_prctl_get_fp_mode do_prctl_inval0
6385 #endif
6386 #ifndef do_prctl_set_fp_mode
6387 #define do_prctl_set_fp_mode do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_sve_get_vl
6390 #define do_prctl_sve_get_vl do_prctl_inval0
6391 #endif
6392 #ifndef do_prctl_sve_set_vl
6393 #define do_prctl_sve_set_vl do_prctl_inval1
6394 #endif
6395 #ifndef do_prctl_reset_keys
6396 #define do_prctl_reset_keys do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_set_tagged_addr_ctrl
6399 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_get_tagged_addr_ctrl
6402 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6403 #endif
6404 #ifndef do_prctl_get_unalign
6405 #define do_prctl_get_unalign do_prctl_inval1
6406 #endif
6407 #ifndef do_prctl_set_unalign
6408 #define do_prctl_set_unalign do_prctl_inval1
6409 #endif
6410 #ifndef do_prctl_sme_get_vl
6411 #define do_prctl_sme_get_vl do_prctl_inval0
6412 #endif
6413 #ifndef do_prctl_sme_set_vl
6414 #define do_prctl_sme_set_vl do_prctl_inval1
6415 #endif
6416 #ifndef do_prctl_get_shadow_stack_status
6417 #define do_prctl_get_shadow_stack_status do_prctl_inval1
6418 #endif
6419 #ifndef do_prctl_set_shadow_stack_status
6420 #define do_prctl_set_shadow_stack_status do_prctl_inval1
6421 #endif
6422 #ifndef do_prctl_lock_shadow_stack_status
6423 #define do_prctl_lock_shadow_stack_status do_prctl_inval1
6424 #endif
6425 
6426 static abi_long do_prctl_syscall_user_dispatch(CPUArchState *env,
6427                                                abi_ulong arg2, abi_ulong arg3,
6428                                                abi_ulong arg4, abi_ulong arg5)
6429 {
6430     CPUState *cpu = env_cpu(env);
6431     TaskState *ts = get_task_state(cpu);
6432 
6433     switch (arg2) {
6434     case PR_SYS_DISPATCH_OFF:
6435         if (arg3 || arg4 || arg5) {
6436             return -TARGET_EINVAL;
6437         }
6438         ts->sys_dispatch_len = -1;
6439         return 0;
6440     case PR_SYS_DISPATCH_ON:
6441         if (arg3 && arg3 + arg4 <= arg3) {
6442             return -TARGET_EINVAL;
6443         }
6444         if (arg5 && !access_ok(cpu, VERIFY_READ, arg5, 1)) {
6445             return -TARGET_EFAULT;
6446         }
6447         ts->sys_dispatch = arg3;
6448         ts->sys_dispatch_len = arg4;
6449         ts->sys_dispatch_selector = arg5;
6450         return 0;
6451     default:
6452         return -TARGET_EINVAL;
6453     }
6454 }
6455 
6456 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6457                          abi_long arg3, abi_long arg4, abi_long arg5)
6458 {
6459     abi_long ret;
6460 
6461     switch (option) {
6462     case PR_GET_PDEATHSIG:
6463         {
6464             int deathsig;
6465             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6466                                   arg3, arg4, arg5));
6467             if (!is_error(ret) &&
6468                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6469                 return -TARGET_EFAULT;
6470             }
6471             return ret;
6472         }
6473     case PR_SET_PDEATHSIG:
6474         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6475                                arg3, arg4, arg5));
6476     case PR_GET_NAME:
6477         {
6478             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6479             if (!name) {
6480                 return -TARGET_EFAULT;
6481             }
6482             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6483                                   arg3, arg4, arg5));
6484             unlock_user(name, arg2, 16);
6485             return ret;
6486         }
6487     case PR_SET_NAME:
6488         {
6489             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6490             if (!name) {
6491                 return -TARGET_EFAULT;
6492             }
6493             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6494                                   arg3, arg4, arg5));
6495             unlock_user(name, arg2, 0);
6496             return ret;
6497         }
6498     case PR_GET_FP_MODE:
6499         return do_prctl_get_fp_mode(env);
6500     case PR_SET_FP_MODE:
6501         return do_prctl_set_fp_mode(env, arg2);
6502     case PR_SVE_GET_VL:
6503         return do_prctl_sve_get_vl(env);
6504     case PR_SVE_SET_VL:
6505         return do_prctl_sve_set_vl(env, arg2);
6506     case PR_SME_GET_VL:
6507         return do_prctl_sme_get_vl(env);
6508     case PR_SME_SET_VL:
6509         return do_prctl_sme_set_vl(env, arg2);
6510     case PR_PAC_RESET_KEYS:
6511         if (arg3 || arg4 || arg5) {
6512             return -TARGET_EINVAL;
6513         }
6514         return do_prctl_reset_keys(env, arg2);
6515     case PR_SET_TAGGED_ADDR_CTRL:
6516         if (arg3 || arg4 || arg5) {
6517             return -TARGET_EINVAL;
6518         }
6519         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6520     case PR_GET_TAGGED_ADDR_CTRL:
6521         if (arg2 || arg3 || arg4 || arg5) {
6522             return -TARGET_EINVAL;
6523         }
6524         return do_prctl_get_tagged_addr_ctrl(env);
6525     case PR_GET_SHADOW_STACK_STATUS:
6526         if (arg3 || arg4 || arg5) {
6527             return -TARGET_EINVAL;
6528         }
6529         return do_prctl_get_shadow_stack_status(env, arg2);
6530     case PR_SET_SHADOW_STACK_STATUS:
6531         if (arg3 || arg4 || arg5) {
6532             return -TARGET_EINVAL;
6533         }
6534         return do_prctl_set_shadow_stack_status(env, arg2);
6535     case PR_LOCK_SHADOW_STACK_STATUS:
6536         if (arg3 || arg4 || arg5) {
6537             return -TARGET_EINVAL;
6538         }
6539         return do_prctl_lock_shadow_stack_status(env, arg2);
6540 
6541     case PR_GET_UNALIGN:
6542         return do_prctl_get_unalign(env, arg2);
6543     case PR_SET_UNALIGN:
6544         return do_prctl_set_unalign(env, arg2);
6545 
6546     case PR_SET_SYSCALL_USER_DISPATCH:
6547         return do_prctl_syscall_user_dispatch(env, arg2, arg3, arg4, arg5);
6548 
6549     case PR_CAP_AMBIENT:
6550     case PR_CAPBSET_READ:
6551     case PR_CAPBSET_DROP:
6552     case PR_GET_DUMPABLE:
6553     case PR_SET_DUMPABLE:
6554     case PR_GET_KEEPCAPS:
6555     case PR_SET_KEEPCAPS:
6556     case PR_GET_SECUREBITS:
6557     case PR_SET_SECUREBITS:
6558     case PR_GET_TIMING:
6559     case PR_SET_TIMING:
6560     case PR_GET_TIMERSLACK:
6561     case PR_SET_TIMERSLACK:
6562     case PR_MCE_KILL:
6563     case PR_MCE_KILL_GET:
6564     case PR_GET_NO_NEW_PRIVS:
6565     case PR_SET_NO_NEW_PRIVS:
6566     case PR_GET_IO_FLUSHER:
6567     case PR_SET_IO_FLUSHER:
6568     case PR_SET_CHILD_SUBREAPER:
6569     case PR_GET_SPECULATION_CTRL:
6570     case PR_SET_SPECULATION_CTRL:
6571         /* Some prctl options have no pointer arguments and we can pass on. */
6572         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6573 
6574     case PR_GET_CHILD_SUBREAPER:
6575         {
6576             int val;
6577             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6578                                   arg3, arg4, arg5));
6579             if (!is_error(ret) && put_user_s32(val, arg2)) {
6580                 return -TARGET_EFAULT;
6581             }
6582             return ret;
6583         }
6584 
6585     case PR_GET_TID_ADDRESS:
6586         {
6587             TaskState *ts = get_task_state(env_cpu(env));
6588             return put_user_ual(ts->child_tidptr, arg2);
6589         }
6590 
6591     case PR_GET_FPEXC:
6592     case PR_SET_FPEXC:
6593         /* Was used for SPE on PowerPC. */
6594         return -TARGET_EINVAL;
6595 
6596     case PR_GET_ENDIAN:
6597     case PR_SET_ENDIAN:
6598     case PR_GET_FPEMU:
6599     case PR_SET_FPEMU:
6600     case PR_SET_MM:
6601     case PR_GET_SECCOMP:
6602     case PR_SET_SECCOMP:
6603     case PR_GET_THP_DISABLE:
6604     case PR_SET_THP_DISABLE:
6605     case PR_GET_TSC:
6606     case PR_SET_TSC:
6607         /* Disable to prevent the target disabling stuff we need. */
6608         return -TARGET_EINVAL;
6609 
6610     default:
6611         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6612                       option);
6613         return -TARGET_EINVAL;
6614     }
6615 }
6616 
6617 #ifdef TARGET_AARCH64
6618 static abi_long do_map_shadow_stack(CPUArchState *env, abi_ulong addr,
6619                                     abi_ulong size, abi_int flags)
6620 {
6621     ARMCPU *cpu = env_archcpu(env);
6622     abi_ulong alloc_size;
6623 
6624     if (!cpu_isar_feature(aa64_gcs, cpu)) {
6625         return -TARGET_EOPNOTSUPP;
6626     }
6627     if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) {
6628         return -TARGET_EINVAL;
6629     }
6630     if (addr & ~TARGET_PAGE_MASK) {
6631         return -TARGET_EINVAL;
6632     }
6633     if (size == 8 || !QEMU_IS_ALIGNED(size, 8)) {
6634         return -TARGET_EINVAL;
6635     }
6636 
6637     alloc_size = TARGET_PAGE_ALIGN(size);
6638     if (alloc_size < size) {
6639         return -TARGET_EOVERFLOW;
6640     }
6641 
6642     mmap_lock();
6643     addr = gcs_alloc(addr, alloc_size);
6644     if (addr != -1) {
6645         if (flags & SHADOW_STACK_SET_TOKEN) {
6646             abi_ptr cap_ptr = addr + size - 8;
6647             uint64_t cap_val;
6648 
6649             if (flags & SHADOW_STACK_SET_MARKER) {
6650                 /* Leave an extra empty frame at top-of-stack. */
6651                 cap_ptr -= 8;
6652             }
6653             cap_val = (cap_ptr & TARGET_PAGE_MASK) | 1;
6654             if (put_user_u64(cap_val, cap_ptr)) {
6655                 /* Allocation succeeded above. */
6656                 g_assert_not_reached();
6657             }
6658         }
6659     }
6660     mmap_unlock();
6661     return get_errno(addr);
6662 }
6663 #endif
6664 
6665 #define NEW_STACK_SIZE 0x40000
6666 
6667 
6668 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6669 typedef struct {
6670     CPUArchState *env;
6671     pthread_mutex_t mutex;
6672     pthread_cond_t cond;
6673     pthread_t thread;
6674     uint32_t tid;
6675     abi_ulong child_tidptr;
6676     abi_ulong parent_tidptr;
6677     sigset_t sigmask;
6678 } new_thread_info;
6679 
6680 static void *clone_func(void *arg)
6681 {
6682     new_thread_info *info = arg;
6683     CPUArchState *env;
6684     CPUState *cpu;
6685     TaskState *ts;
6686 
6687     rcu_register_thread();
6688     tcg_register_thread();
6689     env = info->env;
6690     cpu = env_cpu(env);
6691     thread_cpu = cpu;
6692     ts = get_task_state(cpu);
6693     info->tid = sys_gettid();
6694     task_settid(ts);
6695     if (info->child_tidptr)
6696         put_user_u32(info->tid, info->child_tidptr);
6697     if (info->parent_tidptr)
6698         put_user_u32(info->tid, info->parent_tidptr);
6699     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6700     /* Enable signals.  */
6701     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6702     /* Signal to the parent that we're ready.  */
6703     pthread_mutex_lock(&info->mutex);
6704     pthread_cond_broadcast(&info->cond);
6705     pthread_mutex_unlock(&info->mutex);
6706     /* Wait until the parent has finished initializing the tls state.  */
6707     pthread_mutex_lock(&clone_lock);
6708     pthread_mutex_unlock(&clone_lock);
6709     cpu_loop(env);
6710     /* never exits */
6711     return NULL;
6712 }
6713 
6714 /* do_fork() Must return host values and target errnos (unlike most
6715    do_*() functions). */
6716 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6717                    abi_ulong parent_tidptr, target_ulong newtls,
6718                    abi_ulong child_tidptr)
6719 {
6720     CPUState *cpu = env_cpu(env);
6721     int ret;
6722     TaskState *ts;
6723     CPUState *new_cpu;
6724     CPUArchState *new_env;
6725     sigset_t sigmask;
6726 
6727     flags &= ~CLONE_IGNORED_FLAGS;
6728 
6729     /* Emulate vfork() with fork() */
6730     if (flags & CLONE_VFORK)
6731         flags &= ~(CLONE_VFORK | CLONE_VM);
6732 
6733     if (flags & CLONE_VM) {
6734         TaskState *parent_ts = get_task_state(cpu);
6735         new_thread_info info;
6736         pthread_attr_t attr;
6737 
6738         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6739             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6740             return -TARGET_EINVAL;
6741         }
6742 
6743         ts = g_new0(TaskState, 1);
6744         init_task_state(ts);
6745 
6746 #ifdef TARGET_AARCH64
6747         /*
6748          * If GCS is enabled in the parent thread, it is also enabled
6749          * in the child thread, but with a newly allocated stack.
6750          */
6751         abi_long new_gcspr = 0;
6752         if (env->cp15.gcscr_el[0] & GCSCR_PCRSEL) {
6753             new_gcspr = gcs_new_stack(ts);
6754             if (new_gcspr == -1) {
6755                 g_free(ts);
6756                 return -TARGET_ENOMEM;
6757             }
6758         }
6759 #endif
6760 
6761         /* Grab a mutex so that thread setup appears atomic.  */
6762         pthread_mutex_lock(&clone_lock);
6763 
6764         /*
6765          * If this is our first additional thread, we need to ensure we
6766          * generate code for parallel execution and flush old translations.
6767          * Do this now so that the copy gets CF_PARALLEL too.
6768          */
6769         begin_parallel_context(cpu);
6770 
6771         /* we create a new CPU instance. */
6772         new_env = cpu_copy(env);
6773         /* Init regs that differ from the parent.  */
6774         cpu_clone_regs_child(new_env, newsp, flags);
6775         cpu_clone_regs_parent(env, flags);
6776         new_cpu = env_cpu(new_env);
6777         new_cpu->opaque = ts;
6778         ts->bprm = parent_ts->bprm;
6779         ts->info = parent_ts->info;
6780         ts->signal_mask = parent_ts->signal_mask;
6781 
6782 #ifdef TARGET_AARCH64
6783         ts->gcs_el0_locked = parent_ts->gcs_el0_locked;
6784         new_env->cp15.gcspr_el[0] = new_gcspr;
6785 #endif
6786 
6787         if (flags & CLONE_CHILD_CLEARTID) {
6788             ts->child_tidptr = child_tidptr;
6789         }
6790 
6791         if (flags & CLONE_SETTLS) {
6792             cpu_set_tls (new_env, newtls);
6793         }
6794 
6795         memset(&info, 0, sizeof(info));
6796         pthread_mutex_init(&info.mutex, NULL);
6797         pthread_mutex_lock(&info.mutex);
6798         pthread_cond_init(&info.cond, NULL);
6799         info.env = new_env;
6800         if (flags & CLONE_CHILD_SETTID) {
6801             info.child_tidptr = child_tidptr;
6802         }
6803         if (flags & CLONE_PARENT_SETTID) {
6804             info.parent_tidptr = parent_tidptr;
6805         }
6806 
6807         ret = pthread_attr_init(&attr);
6808         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6809         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6810         /* It is not safe to deliver signals until the child has finished
6811            initializing, so temporarily block all signals.  */
6812         sigfillset(&sigmask);
6813         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6814         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6815 
6816         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6817         /* TODO: Free new CPU state if thread creation failed.  */
6818 
6819         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6820         pthread_attr_destroy(&attr);
6821         if (ret == 0) {
6822             /* Wait for the child to initialize.  */
6823             pthread_cond_wait(&info.cond, &info.mutex);
6824             ret = info.tid;
6825         } else {
6826             ret = -1;
6827         }
6828         pthread_mutex_unlock(&info.mutex);
6829         pthread_cond_destroy(&info.cond);
6830         pthread_mutex_destroy(&info.mutex);
6831         pthread_mutex_unlock(&clone_lock);
6832     } else {
6833         /* if no CLONE_VM, we consider it is a fork */
6834         if (flags & CLONE_INVALID_FORK_FLAGS) {
6835             return -TARGET_EINVAL;
6836         }
6837 
6838         /* We can't support custom termination signals */
6839         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6840             return -TARGET_EINVAL;
6841         }
6842 
6843 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6844         if (flags & CLONE_PIDFD) {
6845             return -TARGET_EINVAL;
6846         }
6847 #endif
6848 
6849         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6850         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6851             return -TARGET_EINVAL;
6852         }
6853 
6854         if (block_signals()) {
6855             return -QEMU_ERESTARTSYS;
6856         }
6857 
6858         fork_start();
6859         ret = fork();
6860         if (ret == 0) {
6861             /* Child Process.  */
6862             cpu_clone_regs_child(env, newsp, flags);
6863             fork_end(ret);
6864             /* There is a race condition here.  The parent process could
6865                theoretically read the TID in the child process before the child
6866                tid is set.  This would require using either ptrace
6867                (not implemented) or having *_tidptr to point at a shared memory
6868                mapping.  We can't repeat the spinlock hack used above because
6869                the child process gets its own copy of the lock.  */
6870             if (flags & CLONE_CHILD_SETTID)
6871                 put_user_u32(sys_gettid(), child_tidptr);
6872             if (flags & CLONE_PARENT_SETTID)
6873                 put_user_u32(sys_gettid(), parent_tidptr);
6874             ts = get_task_state(cpu);
6875             if (flags & CLONE_SETTLS)
6876                 cpu_set_tls (env, newtls);
6877             if (flags & CLONE_CHILD_CLEARTID)
6878                 ts->child_tidptr = child_tidptr;
6879         } else {
6880             cpu_clone_regs_parent(env, flags);
6881             if (flags & CLONE_PIDFD) {
6882                 int pid_fd = 0;
6883 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6884                 int pid_child = ret;
6885                 pid_fd = pidfd_open(pid_child, 0);
6886                 if (pid_fd >= 0) {
6887                     qemu_set_cloexec(pid_fd);
6888                 } else {
6889                     pid_fd = 0;
6890                 }
6891 #endif
6892                 put_user_u32(pid_fd, parent_tidptr);
6893             }
6894             fork_end(ret);
6895         }
6896         g_assert(!cpu_in_exclusive_context(cpu));
6897     }
6898     return ret;
6899 }
6900 
6901 /* warning : doesn't handle linux specific flags... */
6902 static int target_to_host_fcntl_cmd(int cmd)
6903 {
6904     int ret;
6905 
6906     switch(cmd) {
6907     case TARGET_F_DUPFD:
6908     case TARGET_F_GETFD:
6909     case TARGET_F_SETFD:
6910     case TARGET_F_GETFL:
6911     case TARGET_F_SETFL:
6912     case TARGET_F_OFD_GETLK:
6913     case TARGET_F_OFD_SETLK:
6914     case TARGET_F_OFD_SETLKW:
6915         ret = cmd;
6916         break;
6917     case TARGET_F_GETLK:
6918         ret = F_GETLK;
6919         break;
6920     case TARGET_F_SETLK:
6921         ret = F_SETLK;
6922         break;
6923     case TARGET_F_SETLKW:
6924         ret = F_SETLKW;
6925         break;
6926     case TARGET_F_GETOWN:
6927         ret = F_GETOWN;
6928         break;
6929     case TARGET_F_SETOWN:
6930         ret = F_SETOWN;
6931         break;
6932     case TARGET_F_GETSIG:
6933         ret = F_GETSIG;
6934         break;
6935     case TARGET_F_SETSIG:
6936         ret = F_SETSIG;
6937         break;
6938 #if TARGET_ABI_BITS == 32
6939     case TARGET_F_GETLK64:
6940         ret = F_GETLK;
6941         break;
6942     case TARGET_F_SETLK64:
6943         ret = F_SETLK;
6944         break;
6945     case TARGET_F_SETLKW64:
6946         ret = F_SETLKW;
6947         break;
6948 #endif
6949     case TARGET_F_SETLEASE:
6950         ret = F_SETLEASE;
6951         break;
6952     case TARGET_F_GETLEASE:
6953         ret = F_GETLEASE;
6954         break;
6955 #ifdef F_DUPFD_CLOEXEC
6956     case TARGET_F_DUPFD_CLOEXEC:
6957         ret = F_DUPFD_CLOEXEC;
6958         break;
6959 #endif
6960     case TARGET_F_NOTIFY:
6961         ret = F_NOTIFY;
6962         break;
6963 #ifdef F_GETOWN_EX
6964     case TARGET_F_GETOWN_EX:
6965         ret = F_GETOWN_EX;
6966         break;
6967 #endif
6968 #ifdef F_SETOWN_EX
6969     case TARGET_F_SETOWN_EX:
6970         ret = F_SETOWN_EX;
6971         break;
6972 #endif
6973 #ifdef F_SETPIPE_SZ
6974     case TARGET_F_SETPIPE_SZ:
6975         ret = F_SETPIPE_SZ;
6976         break;
6977     case TARGET_F_GETPIPE_SZ:
6978         ret = F_GETPIPE_SZ;
6979         break;
6980 #endif
6981 #ifdef F_ADD_SEALS
6982     case TARGET_F_ADD_SEALS:
6983         ret = F_ADD_SEALS;
6984         break;
6985     case TARGET_F_GET_SEALS:
6986         ret = F_GET_SEALS;
6987         break;
6988 #endif
6989     default:
6990         ret = -TARGET_EINVAL;
6991         break;
6992     }
6993 
6994 #if defined(__powerpc64__)
6995     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6996      * is not supported by kernel. The glibc fcntl call actually adjusts
6997      * them to 5, 6 and 7 before making the syscall(). Since we make the
6998      * syscall directly, adjust to what is supported by the kernel.
6999      */
7000     if (ret >= F_GETLK && ret <= F_SETLKW) {
7001         ret -= F_GETLK - 5;
7002     }
7003 #endif
7004 
7005     return ret;
7006 }
7007 
7008 #define FLOCK_TRANSTBL \
7009     switch (type) { \
7010     TRANSTBL_CONVERT(F_RDLCK); \
7011     TRANSTBL_CONVERT(F_WRLCK); \
7012     TRANSTBL_CONVERT(F_UNLCK); \
7013     }
7014 
7015 static int target_to_host_flock(int type)
7016 {
7017 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
7018     FLOCK_TRANSTBL
7019 #undef  TRANSTBL_CONVERT
7020     return -TARGET_EINVAL;
7021 }
7022 
7023 static int host_to_target_flock(int type)
7024 {
7025 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
7026     FLOCK_TRANSTBL
7027 #undef  TRANSTBL_CONVERT
7028     /* if we don't know how to convert the value coming
7029      * from the host we copy to the target field as-is
7030      */
7031     return type;
7032 }
7033 
7034 static inline abi_long copy_from_user_flock(struct flock *fl,
7035                                             abi_ulong target_flock_addr)
7036 {
7037     struct target_flock *target_fl;
7038     int l_type;
7039 
7040     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7041         return -TARGET_EFAULT;
7042     }
7043 
7044     __get_user(l_type, &target_fl->l_type);
7045     l_type = target_to_host_flock(l_type);
7046     if (l_type < 0) {
7047         return l_type;
7048     }
7049     fl->l_type = l_type;
7050     __get_user(fl->l_whence, &target_fl->l_whence);
7051     __get_user(fl->l_start, &target_fl->l_start);
7052     __get_user(fl->l_len, &target_fl->l_len);
7053     __get_user(fl->l_pid, &target_fl->l_pid);
7054     unlock_user_struct(target_fl, target_flock_addr, 0);
7055     return 0;
7056 }
7057 
7058 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
7059                                           const struct flock *fl)
7060 {
7061     struct target_flock *target_fl;
7062     short l_type;
7063 
7064     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7065         return -TARGET_EFAULT;
7066     }
7067 
7068     l_type = host_to_target_flock(fl->l_type);
7069     __put_user(l_type, &target_fl->l_type);
7070     __put_user(fl->l_whence, &target_fl->l_whence);
7071     __put_user(fl->l_start, &target_fl->l_start);
7072     __put_user(fl->l_len, &target_fl->l_len);
7073     __put_user(fl->l_pid, &target_fl->l_pid);
7074     unlock_user_struct(target_fl, target_flock_addr, 1);
7075     return 0;
7076 }
7077 
7078 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
7079 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
7080 
7081 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7082 struct target_oabi_flock64 {
7083     abi_short l_type;
7084     abi_short l_whence;
7085     abi_llong l_start;
7086     abi_llong l_len;
7087     abi_int   l_pid;
7088 } QEMU_PACKED;
7089 
7090 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
7091                                                    abi_ulong target_flock_addr)
7092 {
7093     struct target_oabi_flock64 *target_fl;
7094     int l_type;
7095 
7096     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7097         return -TARGET_EFAULT;
7098     }
7099 
7100     __get_user(l_type, &target_fl->l_type);
7101     l_type = target_to_host_flock(l_type);
7102     if (l_type < 0) {
7103         return l_type;
7104     }
7105     fl->l_type = l_type;
7106     __get_user(fl->l_whence, &target_fl->l_whence);
7107     __get_user(fl->l_start, &target_fl->l_start);
7108     __get_user(fl->l_len, &target_fl->l_len);
7109     __get_user(fl->l_pid, &target_fl->l_pid);
7110     unlock_user_struct(target_fl, target_flock_addr, 0);
7111     return 0;
7112 }
7113 
7114 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7115                                                  const struct flock *fl)
7116 {
7117     struct target_oabi_flock64 *target_fl;
7118     short l_type;
7119 
7120     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7121         return -TARGET_EFAULT;
7122     }
7123 
7124     l_type = host_to_target_flock(fl->l_type);
7125     __put_user(l_type, &target_fl->l_type);
7126     __put_user(fl->l_whence, &target_fl->l_whence);
7127     __put_user(fl->l_start, &target_fl->l_start);
7128     __put_user(fl->l_len, &target_fl->l_len);
7129     __put_user(fl->l_pid, &target_fl->l_pid);
7130     unlock_user_struct(target_fl, target_flock_addr, 1);
7131     return 0;
7132 }
7133 #endif
7134 
7135 static inline abi_long copy_from_user_flock64(struct flock *fl,
7136                                               abi_ulong target_flock_addr)
7137 {
7138     struct target_flock64 *target_fl;
7139     int l_type;
7140 
7141     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7142         return -TARGET_EFAULT;
7143     }
7144 
7145     __get_user(l_type, &target_fl->l_type);
7146     l_type = target_to_host_flock(l_type);
7147     if (l_type < 0) {
7148         return l_type;
7149     }
7150     fl->l_type = l_type;
7151     __get_user(fl->l_whence, &target_fl->l_whence);
7152     __get_user(fl->l_start, &target_fl->l_start);
7153     __get_user(fl->l_len, &target_fl->l_len);
7154     __get_user(fl->l_pid, &target_fl->l_pid);
7155     unlock_user_struct(target_fl, target_flock_addr, 0);
7156     return 0;
7157 }
7158 
7159 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7160                                             const struct flock *fl)
7161 {
7162     struct target_flock64 *target_fl;
7163     short l_type;
7164 
7165     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7166         return -TARGET_EFAULT;
7167     }
7168 
7169     l_type = host_to_target_flock(fl->l_type);
7170     __put_user(l_type, &target_fl->l_type);
7171     __put_user(fl->l_whence, &target_fl->l_whence);
7172     __put_user(fl->l_start, &target_fl->l_start);
7173     __put_user(fl->l_len, &target_fl->l_len);
7174     __put_user(fl->l_pid, &target_fl->l_pid);
7175     unlock_user_struct(target_fl, target_flock_addr, 1);
7176     return 0;
7177 }
7178 
7179 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7180 {
7181     struct flock fl;
7182 #ifdef F_GETOWN_EX
7183     struct f_owner_ex fox;
7184     struct target_f_owner_ex *target_fox;
7185 #endif
7186     abi_long ret;
7187     int host_cmd = target_to_host_fcntl_cmd(cmd);
7188 
7189     if (host_cmd == -TARGET_EINVAL)
7190 	    return host_cmd;
7191 
7192     switch(cmd) {
7193     case TARGET_F_GETLK:
7194         ret = copy_from_user_flock(&fl, arg);
7195         if (ret) {
7196             return ret;
7197         }
7198         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7199         if (ret == 0) {
7200             ret = copy_to_user_flock(arg, &fl);
7201         }
7202         break;
7203 
7204     case TARGET_F_SETLK:
7205     case TARGET_F_SETLKW:
7206         ret = copy_from_user_flock(&fl, arg);
7207         if (ret) {
7208             return ret;
7209         }
7210         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7211         break;
7212 
7213     case TARGET_F_GETLK64:
7214     case TARGET_F_OFD_GETLK:
7215         ret = copy_from_user_flock64(&fl, arg);
7216         if (ret) {
7217             return ret;
7218         }
7219         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7220         if (ret == 0) {
7221             ret = copy_to_user_flock64(arg, &fl);
7222         }
7223         break;
7224     case TARGET_F_SETLK64:
7225     case TARGET_F_SETLKW64:
7226     case TARGET_F_OFD_SETLK:
7227     case TARGET_F_OFD_SETLKW:
7228         ret = copy_from_user_flock64(&fl, arg);
7229         if (ret) {
7230             return ret;
7231         }
7232         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7233         break;
7234 
7235     case TARGET_F_GETFL:
7236         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7237         if (ret >= 0) {
7238             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7239             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7240             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7241                 ret |= TARGET_O_LARGEFILE;
7242             }
7243         }
7244         break;
7245 
7246     case TARGET_F_SETFL:
7247         ret = get_errno(safe_fcntl(fd, host_cmd,
7248                                    target_to_host_bitmask(arg,
7249                                                           fcntl_flags_tbl)));
7250         break;
7251 
7252 #ifdef F_GETOWN_EX
7253     case TARGET_F_GETOWN_EX:
7254         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7255         if (ret >= 0) {
7256             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7257                 return -TARGET_EFAULT;
7258             target_fox->type = tswap32(fox.type);
7259             target_fox->pid = tswap32(fox.pid);
7260             unlock_user_struct(target_fox, arg, 1);
7261         }
7262         break;
7263 #endif
7264 
7265 #ifdef F_SETOWN_EX
7266     case TARGET_F_SETOWN_EX:
7267         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7268             return -TARGET_EFAULT;
7269         fox.type = tswap32(target_fox->type);
7270         fox.pid = tswap32(target_fox->pid);
7271         unlock_user_struct(target_fox, arg, 0);
7272         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7273         break;
7274 #endif
7275 
7276     case TARGET_F_SETSIG:
7277         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7278         break;
7279 
7280     case TARGET_F_GETSIG:
7281         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7282         break;
7283 
7284     case TARGET_F_SETOWN:
7285     case TARGET_F_GETOWN:
7286     case TARGET_F_SETLEASE:
7287     case TARGET_F_GETLEASE:
7288     case TARGET_F_SETPIPE_SZ:
7289     case TARGET_F_GETPIPE_SZ:
7290     case TARGET_F_ADD_SEALS:
7291     case TARGET_F_GET_SEALS:
7292         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7293         break;
7294 
7295     default:
7296         ret = get_errno(safe_fcntl(fd, cmd, arg));
7297         break;
7298     }
7299     return ret;
7300 }
7301 
7302 #ifdef USE_UID16
7303 
7304 static inline int high2lowuid(int uid)
7305 {
7306     if (uid > 65535)
7307         return 65534;
7308     else
7309         return uid;
7310 }
7311 
7312 static inline int high2lowgid(int gid)
7313 {
7314     if (gid > 65535)
7315         return 65534;
7316     else
7317         return gid;
7318 }
7319 
7320 static inline int low2highuid(int uid)
7321 {
7322     if ((int16_t)uid == -1)
7323         return -1;
7324     else
7325         return uid;
7326 }
7327 
7328 static inline int low2highgid(int gid)
7329 {
7330     if ((int16_t)gid == -1)
7331         return -1;
7332     else
7333         return gid;
7334 }
7335 static inline int tswapid(int id)
7336 {
7337     return tswap16(id);
7338 }
7339 
7340 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7341 
7342 #else /* !USE_UID16 */
7343 static inline int high2lowuid(int uid)
7344 {
7345     return uid;
7346 }
7347 static inline int high2lowgid(int gid)
7348 {
7349     return gid;
7350 }
7351 static inline int low2highuid(int uid)
7352 {
7353     return uid;
7354 }
7355 static inline int low2highgid(int gid)
7356 {
7357     return gid;
7358 }
7359 static inline int tswapid(int id)
7360 {
7361     return tswap32(id);
7362 }
7363 
7364 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7365 
7366 #endif /* USE_UID16 */
7367 
7368 /* We must do direct syscalls for setting UID/GID, because we want to
7369  * implement the Linux system call semantics of "change only for this thread",
7370  * not the libc/POSIX semantics of "change for all threads in process".
7371  * (See http://ewontfix.com/17/ for more details.)
7372  * We use the 32-bit version of the syscalls if present; if it is not
7373  * then either the host architecture supports 32-bit UIDs natively with
7374  * the standard syscall, or the 16-bit UID is the best we can do.
7375  */
7376 #ifdef __NR_setuid32
7377 #define __NR_sys_setuid __NR_setuid32
7378 #else
7379 #define __NR_sys_setuid __NR_setuid
7380 #endif
7381 #ifdef __NR_setgid32
7382 #define __NR_sys_setgid __NR_setgid32
7383 #else
7384 #define __NR_sys_setgid __NR_setgid
7385 #endif
7386 #ifdef __NR_setresuid32
7387 #define __NR_sys_setresuid __NR_setresuid32
7388 #else
7389 #define __NR_sys_setresuid __NR_setresuid
7390 #endif
7391 #ifdef __NR_setresgid32
7392 #define __NR_sys_setresgid __NR_setresgid32
7393 #else
7394 #define __NR_sys_setresgid __NR_setresgid
7395 #endif
7396 #ifdef __NR_setgroups32
7397 #define __NR_sys_setgroups __NR_setgroups32
7398 #else
7399 #define __NR_sys_setgroups __NR_setgroups
7400 #endif
7401 #ifdef __NR_sys_setreuid32
7402 #define __NR_sys_setreuid __NR_setreuid32
7403 #else
7404 #define __NR_sys_setreuid __NR_setreuid
7405 #endif
7406 #ifdef __NR_sys_setregid32
7407 #define __NR_sys_setregid __NR_setregid32
7408 #else
7409 #define __NR_sys_setregid __NR_setregid
7410 #endif
7411 
7412 _syscall1(int, sys_setuid, uid_t, uid)
7413 _syscall1(int, sys_setgid, gid_t, gid)
7414 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7415 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7416 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7417 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7418 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7419 
7420 void syscall_init(void)
7421 {
7422     IOCTLEntry *ie;
7423     const argtype *arg_type;
7424     int size;
7425 
7426     thunk_init(STRUCT_MAX);
7427 
7428 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7429 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7430 #include "syscall_types.h"
7431 #undef STRUCT
7432 #undef STRUCT_SPECIAL
7433 
7434     /* we patch the ioctl size if necessary. We rely on the fact that
7435        no ioctl has all the bits at '1' in the size field */
7436     ie = ioctl_entries;
7437     while (ie->target_cmd != 0) {
7438         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7439             TARGET_IOC_SIZEMASK) {
7440             arg_type = ie->arg_type;
7441             if (arg_type[0] != TYPE_PTR) {
7442                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7443                         ie->target_cmd);
7444                 exit(1);
7445             }
7446             arg_type++;
7447             size = thunk_type_size(arg_type, 0);
7448             ie->target_cmd = (ie->target_cmd &
7449                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7450                 (size << TARGET_IOC_SIZESHIFT);
7451         }
7452 
7453         /* automatic consistency check if same arch */
7454 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7455     (defined(__x86_64__) && defined(TARGET_X86_64))
7456         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7457             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7458                     ie->name, ie->target_cmd, ie->host_cmd);
7459         }
7460 #endif
7461         ie++;
7462     }
7463 }
7464 
7465 #ifdef TARGET_NR_truncate64
7466 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7467                                          abi_long arg2,
7468                                          abi_long arg3,
7469                                          abi_long arg4)
7470 {
7471     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7472         arg2 = arg3;
7473         arg3 = arg4;
7474     }
7475     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7476 }
7477 #endif
7478 
7479 #ifdef TARGET_NR_ftruncate64
7480 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7481                                           abi_long arg2,
7482                                           abi_long arg3,
7483                                           abi_long arg4)
7484 {
7485     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7486         arg2 = arg3;
7487         arg3 = arg4;
7488     }
7489     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7490 }
7491 #endif
7492 
7493 #if defined(TARGET_NR_timer_settime) || \
7494     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7495 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7496                                                  abi_ulong target_addr)
7497 {
7498     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7499                                 offsetof(struct target_itimerspec,
7500                                          it_interval)) ||
7501         target_to_host_timespec(&host_its->it_value, target_addr +
7502                                 offsetof(struct target_itimerspec,
7503                                          it_value))) {
7504         return -TARGET_EFAULT;
7505     }
7506 
7507     return 0;
7508 }
7509 #endif
7510 
7511 #if defined(TARGET_NR_timer_settime64) || \
7512     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7513 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7514                                                    abi_ulong target_addr)
7515 {
7516     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7517                                   offsetof(struct target__kernel_itimerspec,
7518                                            it_interval)) ||
7519         target_to_host_timespec64(&host_its->it_value, target_addr +
7520                                   offsetof(struct target__kernel_itimerspec,
7521                                            it_value))) {
7522         return -TARGET_EFAULT;
7523     }
7524 
7525     return 0;
7526 }
7527 #endif
7528 
7529 #if ((defined(TARGET_NR_timerfd_gettime) || \
7530       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7531       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7532 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7533                                                  struct itimerspec *host_its)
7534 {
7535     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7536                                                        it_interval),
7537                                 &host_its->it_interval) ||
7538         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7539                                                        it_value),
7540                                 &host_its->it_value)) {
7541         return -TARGET_EFAULT;
7542     }
7543     return 0;
7544 }
7545 #endif
7546 
7547 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7548       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7549       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7550 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7551                                                    struct itimerspec *host_its)
7552 {
7553     if (host_to_target_timespec64(target_addr +
7554                                   offsetof(struct target__kernel_itimerspec,
7555                                            it_interval),
7556                                   &host_its->it_interval) ||
7557         host_to_target_timespec64(target_addr +
7558                                   offsetof(struct target__kernel_itimerspec,
7559                                            it_value),
7560                                   &host_its->it_value)) {
7561         return -TARGET_EFAULT;
7562     }
7563     return 0;
7564 }
7565 #endif
7566 
7567 #if defined(TARGET_NR_adjtimex) || \
7568     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7569 static inline abi_long target_to_host_timex(struct timex *host_tx,
7570                                             abi_long target_addr)
7571 {
7572     struct target_timex *target_tx;
7573 
7574     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7575         return -TARGET_EFAULT;
7576     }
7577 
7578     __get_user(host_tx->modes, &target_tx->modes);
7579     __get_user(host_tx->offset, &target_tx->offset);
7580     __get_user(host_tx->freq, &target_tx->freq);
7581     __get_user(host_tx->maxerror, &target_tx->maxerror);
7582     __get_user(host_tx->esterror, &target_tx->esterror);
7583     __get_user(host_tx->status, &target_tx->status);
7584     __get_user(host_tx->constant, &target_tx->constant);
7585     __get_user(host_tx->precision, &target_tx->precision);
7586     __get_user(host_tx->tolerance, &target_tx->tolerance);
7587     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7588     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7589     __get_user(host_tx->tick, &target_tx->tick);
7590     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7591     __get_user(host_tx->jitter, &target_tx->jitter);
7592     __get_user(host_tx->shift, &target_tx->shift);
7593     __get_user(host_tx->stabil, &target_tx->stabil);
7594     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7595     __get_user(host_tx->calcnt, &target_tx->calcnt);
7596     __get_user(host_tx->errcnt, &target_tx->errcnt);
7597     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7598     __get_user(host_tx->tai, &target_tx->tai);
7599 
7600     unlock_user_struct(target_tx, target_addr, 0);
7601     return 0;
7602 }
7603 
7604 static inline abi_long host_to_target_timex(abi_long target_addr,
7605                                             struct timex *host_tx)
7606 {
7607     struct target_timex *target_tx;
7608 
7609     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7610         return -TARGET_EFAULT;
7611     }
7612 
7613     __put_user(host_tx->modes, &target_tx->modes);
7614     __put_user(host_tx->offset, &target_tx->offset);
7615     __put_user(host_tx->freq, &target_tx->freq);
7616     __put_user(host_tx->maxerror, &target_tx->maxerror);
7617     __put_user(host_tx->esterror, &target_tx->esterror);
7618     __put_user(host_tx->status, &target_tx->status);
7619     __put_user(host_tx->constant, &target_tx->constant);
7620     __put_user(host_tx->precision, &target_tx->precision);
7621     __put_user(host_tx->tolerance, &target_tx->tolerance);
7622     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7623     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7624     __put_user(host_tx->tick, &target_tx->tick);
7625     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7626     __put_user(host_tx->jitter, &target_tx->jitter);
7627     __put_user(host_tx->shift, &target_tx->shift);
7628     __put_user(host_tx->stabil, &target_tx->stabil);
7629     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7630     __put_user(host_tx->calcnt, &target_tx->calcnt);
7631     __put_user(host_tx->errcnt, &target_tx->errcnt);
7632     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7633     __put_user(host_tx->tai, &target_tx->tai);
7634 
7635     unlock_user_struct(target_tx, target_addr, 1);
7636     return 0;
7637 }
7638 #endif
7639 
7640 
7641 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7642 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7643                                               abi_long target_addr)
7644 {
7645     struct target__kernel_timex *target_tx;
7646 
7647     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7648                                  offsetof(struct target__kernel_timex,
7649                                           time))) {
7650         return -TARGET_EFAULT;
7651     }
7652 
7653     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7654         return -TARGET_EFAULT;
7655     }
7656 
7657     __get_user(host_tx->modes, &target_tx->modes);
7658     __get_user(host_tx->offset, &target_tx->offset);
7659     __get_user(host_tx->freq, &target_tx->freq);
7660     __get_user(host_tx->maxerror, &target_tx->maxerror);
7661     __get_user(host_tx->esterror, &target_tx->esterror);
7662     __get_user(host_tx->status, &target_tx->status);
7663     __get_user(host_tx->constant, &target_tx->constant);
7664     __get_user(host_tx->precision, &target_tx->precision);
7665     __get_user(host_tx->tolerance, &target_tx->tolerance);
7666     __get_user(host_tx->tick, &target_tx->tick);
7667     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7668     __get_user(host_tx->jitter, &target_tx->jitter);
7669     __get_user(host_tx->shift, &target_tx->shift);
7670     __get_user(host_tx->stabil, &target_tx->stabil);
7671     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7672     __get_user(host_tx->calcnt, &target_tx->calcnt);
7673     __get_user(host_tx->errcnt, &target_tx->errcnt);
7674     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7675     __get_user(host_tx->tai, &target_tx->tai);
7676 
7677     unlock_user_struct(target_tx, target_addr, 0);
7678     return 0;
7679 }
7680 
7681 static inline abi_long host_to_target_timex64(abi_long target_addr,
7682                                               struct timex *host_tx)
7683 {
7684     struct target__kernel_timex *target_tx;
7685 
7686    if (copy_to_user_timeval64(target_addr +
7687                               offsetof(struct target__kernel_timex, time),
7688                               &host_tx->time)) {
7689         return -TARGET_EFAULT;
7690     }
7691 
7692     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7693         return -TARGET_EFAULT;
7694     }
7695 
7696     __put_user(host_tx->modes, &target_tx->modes);
7697     __put_user(host_tx->offset, &target_tx->offset);
7698     __put_user(host_tx->freq, &target_tx->freq);
7699     __put_user(host_tx->maxerror, &target_tx->maxerror);
7700     __put_user(host_tx->esterror, &target_tx->esterror);
7701     __put_user(host_tx->status, &target_tx->status);
7702     __put_user(host_tx->constant, &target_tx->constant);
7703     __put_user(host_tx->precision, &target_tx->precision);
7704     __put_user(host_tx->tolerance, &target_tx->tolerance);
7705     __put_user(host_tx->tick, &target_tx->tick);
7706     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7707     __put_user(host_tx->jitter, &target_tx->jitter);
7708     __put_user(host_tx->shift, &target_tx->shift);
7709     __put_user(host_tx->stabil, &target_tx->stabil);
7710     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7711     __put_user(host_tx->calcnt, &target_tx->calcnt);
7712     __put_user(host_tx->errcnt, &target_tx->errcnt);
7713     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7714     __put_user(host_tx->tai, &target_tx->tai);
7715 
7716     unlock_user_struct(target_tx, target_addr, 1);
7717     return 0;
7718 }
7719 #endif
7720 
7721 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7722 #define sigev_notify_thread_id _sigev_un._tid
7723 #endif
7724 
7725 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7726                                                abi_ulong target_addr)
7727 {
7728     struct target_sigevent *target_sevp;
7729 
7730     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7731         return -TARGET_EFAULT;
7732     }
7733 
7734     /* This union is awkward on 64 bit systems because it has a 32 bit
7735      * integer and a pointer in it; we follow the conversion approach
7736      * used for handling sigval types in signal.c so the guest should get
7737      * the correct value back even if we did a 64 bit byteswap and it's
7738      * using the 32 bit integer.
7739      */
7740     host_sevp->sigev_value.sival_ptr =
7741         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7742     host_sevp->sigev_signo =
7743         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7744     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7745     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7746 
7747     unlock_user_struct(target_sevp, target_addr, 1);
7748     return 0;
7749 }
7750 
7751 #if defined(TARGET_NR_mlockall)
7752 static inline int target_to_host_mlockall_arg(int arg)
7753 {
7754     int result = 0;
7755 
7756     if (arg & TARGET_MCL_CURRENT) {
7757         result |= MCL_CURRENT;
7758     }
7759     if (arg & TARGET_MCL_FUTURE) {
7760         result |= MCL_FUTURE;
7761     }
7762 #ifdef MCL_ONFAULT
7763     if (arg & TARGET_MCL_ONFAULT) {
7764         result |= MCL_ONFAULT;
7765     }
7766 #endif
7767 
7768     return result;
7769 }
7770 #endif
7771 
7772 static inline int target_to_host_msync_arg(abi_long arg)
7773 {
7774     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7775            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7776            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7777            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7778 }
7779 
7780 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7781      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7782      defined(TARGET_NR_newfstatat))
7783 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7784                                              abi_ulong target_addr,
7785                                              struct stat *host_st)
7786 {
7787 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7788     if (cpu_env->eabi) {
7789         struct target_eabi_stat64 *target_st;
7790 
7791         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7792             return -TARGET_EFAULT;
7793         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7794         __put_user(host_st->st_dev, &target_st->st_dev);
7795         __put_user(host_st->st_ino, &target_st->st_ino);
7796 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7797         __put_user(host_st->st_ino, &target_st->__st_ino);
7798 #endif
7799         __put_user(host_st->st_mode, &target_st->st_mode);
7800         __put_user(host_st->st_nlink, &target_st->st_nlink);
7801         __put_user(host_st->st_uid, &target_st->st_uid);
7802         __put_user(host_st->st_gid, &target_st->st_gid);
7803         __put_user(host_st->st_rdev, &target_st->st_rdev);
7804         __put_user(host_st->st_size, &target_st->st_size);
7805         __put_user(host_st->st_blksize, &target_st->st_blksize);
7806         __put_user(host_st->st_blocks, &target_st->st_blocks);
7807         __put_user(host_st->st_atime, &target_st->target_st_atime);
7808         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7809         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7810 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7811         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7812         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7813         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7814 #endif
7815         unlock_user_struct(target_st, target_addr, 1);
7816     } else
7817 #endif
7818     {
7819 #if defined(TARGET_HAS_STRUCT_STAT64)
7820         struct target_stat64 *target_st;
7821 #else
7822         struct target_stat *target_st;
7823 #endif
7824 
7825         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7826             return -TARGET_EFAULT;
7827         memset(target_st, 0, sizeof(*target_st));
7828         __put_user(host_st->st_dev, &target_st->st_dev);
7829         __put_user(host_st->st_ino, &target_st->st_ino);
7830 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7831         __put_user(host_st->st_ino, &target_st->__st_ino);
7832 #endif
7833         __put_user(host_st->st_mode, &target_st->st_mode);
7834         __put_user(host_st->st_nlink, &target_st->st_nlink);
7835         __put_user(host_st->st_uid, &target_st->st_uid);
7836         __put_user(host_st->st_gid, &target_st->st_gid);
7837         __put_user(host_st->st_rdev, &target_st->st_rdev);
7838         /* XXX: better use of kernel struct */
7839         __put_user(host_st->st_size, &target_st->st_size);
7840         __put_user(host_st->st_blksize, &target_st->st_blksize);
7841         __put_user(host_st->st_blocks, &target_st->st_blocks);
7842         __put_user(host_st->st_atime, &target_st->target_st_atime);
7843         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7844         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7845 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7846         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7847         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7848         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7849 #endif
7850         unlock_user_struct(target_st, target_addr, 1);
7851     }
7852 
7853     return 0;
7854 }
7855 #endif
7856 
7857 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7858 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7859                                             abi_ulong target_addr)
7860 {
7861     struct target_statx *target_stx;
7862 
7863     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7864         return -TARGET_EFAULT;
7865     }
7866     memset(target_stx, 0, sizeof(*target_stx));
7867 
7868     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7869     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7870     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7871     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7872     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7873     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7874     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7875     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7876     __put_user(host_stx->stx_size, &target_stx->stx_size);
7877     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7878     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7879     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7880     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7881     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7882     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7883     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7884     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7885     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7886     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7887     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7888     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7889     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7890     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7891 
7892     unlock_user_struct(target_stx, target_addr, 1);
7893 
7894     return 0;
7895 }
7896 #endif
7897 
7898 static int do_sys_futex(int *uaddr, int op, int val,
7899                          const struct timespec *timeout, int *uaddr2,
7900                          int val3)
7901 {
7902 #if HOST_LONG_BITS == 64
7903 #if defined(__NR_futex)
7904     /* always a 64-bit time_t, it doesn't define _time64 version  */
7905     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7906 
7907 #endif
7908 #else /* HOST_LONG_BITS == 64 */
7909 #if defined(__NR_futex_time64)
7910     if (sizeof(timeout->tv_sec) == 8) {
7911         /* _time64 function on 32bit arch */
7912         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7913     }
7914 #endif
7915 #if defined(__NR_futex)
7916     /* old function on 32bit arch */
7917     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7918 #endif
7919 #endif /* HOST_LONG_BITS == 64 */
7920     g_assert_not_reached();
7921 }
7922 
7923 static int do_safe_futex(int *uaddr, int op, int val,
7924                          const struct timespec *timeout, int *uaddr2,
7925                          int val3)
7926 {
7927 #if HOST_LONG_BITS == 64
7928 #if defined(__NR_futex)
7929     /* always a 64-bit time_t, it doesn't define _time64 version  */
7930     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7931 #endif
7932 #else /* HOST_LONG_BITS == 64 */
7933 #if defined(__NR_futex_time64)
7934     if (sizeof(timeout->tv_sec) == 8) {
7935         /* _time64 function on 32bit arch */
7936         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7937                                            val3));
7938     }
7939 #endif
7940 #if defined(__NR_futex)
7941     /* old function on 32bit arch */
7942     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7943 #endif
7944 #endif /* HOST_LONG_BITS == 64 */
7945     return -TARGET_ENOSYS;
7946 }
7947 
7948 /* ??? Using host futex calls even when target atomic operations
7949    are not really atomic probably breaks things.  However implementing
7950    futexes locally would make futexes shared between multiple processes
7951    tricky.  However they're probably useless because guest atomic
7952    operations won't work either.  */
7953 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7954 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7955                     int op, int val, target_ulong timeout,
7956                     target_ulong uaddr2, int val3)
7957 {
7958     struct timespec ts, *pts = NULL;
7959     void *haddr2 = NULL;
7960     int base_op;
7961 
7962     /* We assume FUTEX_* constants are the same on both host and target. */
7963 #ifdef FUTEX_CMD_MASK
7964     base_op = op & FUTEX_CMD_MASK;
7965 #else
7966     base_op = op;
7967 #endif
7968     switch (base_op) {
7969     case FUTEX_WAIT:
7970     case FUTEX_WAIT_BITSET:
7971         val = tswap32(val);
7972         break;
7973     case FUTEX_WAIT_REQUEUE_PI:
7974         val = tswap32(val);
7975         haddr2 = g2h(cpu, uaddr2);
7976         break;
7977     case FUTEX_LOCK_PI:
7978     case FUTEX_LOCK_PI2:
7979         break;
7980     case FUTEX_WAKE:
7981     case FUTEX_WAKE_BITSET:
7982     case FUTEX_TRYLOCK_PI:
7983     case FUTEX_UNLOCK_PI:
7984         timeout = 0;
7985         break;
7986     case FUTEX_FD:
7987         val = target_to_host_signal(val);
7988         timeout = 0;
7989         break;
7990     case FUTEX_CMP_REQUEUE:
7991     case FUTEX_CMP_REQUEUE_PI:
7992         val3 = tswap32(val3);
7993         /* fall through */
7994     case FUTEX_REQUEUE:
7995     case FUTEX_WAKE_OP:
7996         /*
7997          * For these, the 4th argument is not TIMEOUT, but VAL2.
7998          * But the prototype of do_safe_futex takes a pointer, so
7999          * insert casts to satisfy the compiler.  We do not need
8000          * to tswap VAL2 since it's not compared to guest memory.
8001           */
8002         pts = (struct timespec *)(uintptr_t)timeout;
8003         timeout = 0;
8004         haddr2 = g2h(cpu, uaddr2);
8005         break;
8006     default:
8007         return -TARGET_ENOSYS;
8008     }
8009     if (timeout) {
8010         pts = &ts;
8011         if (time64
8012             ? target_to_host_timespec64(pts, timeout)
8013             : target_to_host_timespec(pts, timeout)) {
8014             return -TARGET_EFAULT;
8015         }
8016     }
8017     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
8018 }
8019 #endif
8020 
8021 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8022 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
8023                                      abi_long handle, abi_long mount_id,
8024                                      abi_long flags)
8025 {
8026     struct file_handle *target_fh;
8027     struct file_handle *fh;
8028     int mid = 0;
8029     abi_long ret;
8030     char *name;
8031     unsigned int size, total_size;
8032 
8033     if (get_user_s32(size, handle)) {
8034         return -TARGET_EFAULT;
8035     }
8036 
8037     name = lock_user_string(pathname);
8038     if (!name) {
8039         return -TARGET_EFAULT;
8040     }
8041 
8042     total_size = sizeof(struct file_handle) + size;
8043     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
8044     if (!target_fh) {
8045         unlock_user(name, pathname, 0);
8046         return -TARGET_EFAULT;
8047     }
8048 
8049     fh = g_malloc0(total_size);
8050     fh->handle_bytes = size;
8051 
8052     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
8053     unlock_user(name, pathname, 0);
8054 
8055     /* man name_to_handle_at(2):
8056      * Other than the use of the handle_bytes field, the caller should treat
8057      * the file_handle structure as an opaque data type
8058      */
8059 
8060     memcpy(target_fh, fh, total_size);
8061     target_fh->handle_bytes = tswap32(fh->handle_bytes);
8062     target_fh->handle_type = tswap32(fh->handle_type);
8063     g_free(fh);
8064     unlock_user(target_fh, handle, total_size);
8065 
8066     if (put_user_s32(mid, mount_id)) {
8067         return -TARGET_EFAULT;
8068     }
8069 
8070     return ret;
8071 
8072 }
8073 #endif
8074 
8075 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8076 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
8077                                      abi_long flags)
8078 {
8079     struct file_handle *target_fh;
8080     struct file_handle *fh;
8081     unsigned int size, total_size;
8082     abi_long ret;
8083 
8084     if (get_user_s32(size, handle)) {
8085         return -TARGET_EFAULT;
8086     }
8087 
8088     total_size = sizeof(struct file_handle) + size;
8089     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
8090     if (!target_fh) {
8091         return -TARGET_EFAULT;
8092     }
8093 
8094     fh = g_memdup(target_fh, total_size);
8095     fh->handle_bytes = size;
8096     fh->handle_type = tswap32(target_fh->handle_type);
8097 
8098     ret = get_errno(open_by_handle_at(mount_fd, fh,
8099                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
8100 
8101     g_free(fh);
8102 
8103     unlock_user(target_fh, handle, total_size);
8104 
8105     return ret;
8106 }
8107 #endif
8108 
8109 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8110 
8111 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8112 {
8113     int host_flags;
8114     target_sigset_t *target_mask;
8115     sigset_t host_mask;
8116     abi_long ret;
8117 
8118     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8119         return -TARGET_EINVAL;
8120     }
8121     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8122         return -TARGET_EFAULT;
8123     }
8124 
8125     target_to_host_sigset(&host_mask, target_mask);
8126 
8127     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8128 
8129     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8130     if (ret >= 0) {
8131         fd_trans_register(ret, &target_signalfd_trans);
8132     }
8133 
8134     unlock_user_struct(target_mask, mask, 0);
8135 
8136     return ret;
8137 }
8138 #endif
8139 
8140 /* Map host to target signal numbers for the wait family of syscalls.
8141    Assume all other status bits are the same.  */
8142 int host_to_target_waitstatus(int status)
8143 {
8144     if (WIFSIGNALED(status)) {
8145         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8146     }
8147     if (WIFSTOPPED(status)) {
8148         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8149                | (status & 0xff);
8150     }
8151     return status;
8152 }
8153 
8154 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8155 {
8156     CPUState *cpu = env_cpu(cpu_env);
8157     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8158     int i;
8159 
8160     for (i = 0; i < bprm->argc; i++) {
8161         size_t len = strlen(bprm->argv[i]) + 1;
8162 
8163         if (write(fd, bprm->argv[i], len) != len) {
8164             return -1;
8165         }
8166     }
8167 
8168     return 0;
8169 }
8170 
8171 struct open_self_maps_data {
8172     TaskState *ts;
8173     IntervalTreeRoot *host_maps;
8174     int fd;
8175     bool smaps;
8176 };
8177 
8178 /*
8179  * Subroutine to output one line of /proc/self/maps,
8180  * or one region of /proc/self/smaps.
8181  */
8182 
8183 #ifdef TARGET_HPPA
8184 # define test_stack(S, E, L)  (E == L)
8185 #else
8186 # define test_stack(S, E, L)  (S == L)
8187 #endif
8188 
8189 static void open_self_maps_4(const struct open_self_maps_data *d,
8190                              const MapInfo *mi, abi_ptr start,
8191                              abi_ptr end, unsigned flags)
8192 {
8193     const struct image_info *info = d->ts->info;
8194     const char *path = mi->path;
8195     uint64_t offset;
8196     int fd = d->fd;
8197     int count;
8198 
8199     if (test_stack(start, end, info->stack_limit)) {
8200         path = "[stack]";
8201     } else if (start == info->brk) {
8202         path = "[heap]";
8203     } else if (start == info->vdso) {
8204         path = "[vdso]";
8205 #ifdef TARGET_X86_64
8206     } else if (start == TARGET_VSYSCALL_PAGE) {
8207         path = "[vsyscall]";
8208 #endif
8209     }
8210 
8211     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8212     offset = mi->offset;
8213     if (mi->dev) {
8214         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8215         offset += hstart - mi->itree.start;
8216     }
8217 
8218     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8219                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8220                     start, end,
8221                     (flags & PAGE_READ) ? 'r' : '-',
8222                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8223                     (flags & PAGE_EXEC) ? 'x' : '-',
8224                     mi->is_priv ? 'p' : 's',
8225                     offset, major(mi->dev), minor(mi->dev),
8226                     (uint64_t)mi->inode);
8227     if (path) {
8228         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8229     } else {
8230         dprintf(fd, "\n");
8231     }
8232 
8233     if (d->smaps) {
8234         unsigned long size = end - start;
8235         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8236         unsigned long size_kb = size >> 10;
8237 
8238         dprintf(fd, "Size:                  %lu kB\n"
8239                 "KernelPageSize:        %lu kB\n"
8240                 "MMUPageSize:           %lu kB\n"
8241                 "Rss:                   0 kB\n"
8242                 "Pss:                   0 kB\n"
8243                 "Pss_Dirty:             0 kB\n"
8244                 "Shared_Clean:          0 kB\n"
8245                 "Shared_Dirty:          0 kB\n"
8246                 "Private_Clean:         0 kB\n"
8247                 "Private_Dirty:         0 kB\n"
8248                 "Referenced:            0 kB\n"
8249                 "Anonymous:             %lu kB\n"
8250                 "LazyFree:              0 kB\n"
8251                 "AnonHugePages:         0 kB\n"
8252                 "ShmemPmdMapped:        0 kB\n"
8253                 "FilePmdMapped:         0 kB\n"
8254                 "Shared_Hugetlb:        0 kB\n"
8255                 "Private_Hugetlb:       0 kB\n"
8256                 "Swap:                  0 kB\n"
8257                 "SwapPss:               0 kB\n"
8258                 "Locked:                0 kB\n"
8259                 "THPeligible:    0\n"
8260                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8261                 size_kb, page_size_kb, page_size_kb,
8262                 (flags & PAGE_ANON ? size_kb : 0),
8263                 (flags & PAGE_READ) ? " rd" : "",
8264                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8265                 (flags & PAGE_EXEC) ? " ex" : "",
8266                 mi->is_priv ? "" : " sh",
8267                 (flags & PAGE_READ) ? " mr" : "",
8268                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8269                 (flags & PAGE_EXEC) ? " me" : "",
8270                 mi->is_priv ? "" : " ms");
8271     }
8272 }
8273 
8274 /*
8275  * Callback for walk_memory_regions, when read_self_maps() fails.
8276  * Proceed without the benefit of host /proc/self/maps cross-check.
8277  */
8278 static int open_self_maps_3(void *opaque, vaddr guest_start,
8279                             vaddr guest_end, int flags)
8280 {
8281     static const MapInfo mi = { .is_priv = true };
8282 
8283     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8284     return 0;
8285 }
8286 
8287 /*
8288  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8289  */
8290 static int open_self_maps_2(void *opaque, vaddr guest_start,
8291                             vaddr guest_end, int flags)
8292 {
8293     const struct open_self_maps_data *d = opaque;
8294     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8295     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8296 
8297 #ifdef TARGET_X86_64
8298     /*
8299      * Because of the extremely high position of the page within the guest
8300      * virtual address space, this is not backed by host memory at all.
8301      * Therefore the loop below would fail.  This is the only instance
8302      * of not having host backing memory.
8303      */
8304     if (guest_start == TARGET_VSYSCALL_PAGE) {
8305         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8306     }
8307 #endif
8308 
8309     while (1) {
8310         IntervalTreeNode *n =
8311             interval_tree_iter_first(d->host_maps, host_start, host_start);
8312         MapInfo *mi = container_of(n, MapInfo, itree);
8313         uintptr_t this_hlast = MIN(host_last, n->last);
8314         target_ulong this_gend = h2g(this_hlast) + 1;
8315 
8316         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8317 
8318         if (this_hlast == host_last) {
8319             return 0;
8320         }
8321         host_start = this_hlast + 1;
8322         guest_start = h2g(host_start);
8323     }
8324 }
8325 
8326 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8327 {
8328     struct open_self_maps_data d = {
8329         .ts = get_task_state(env_cpu(env)),
8330         .fd = fd,
8331         .smaps = smaps
8332     };
8333 
8334     mmap_lock();
8335     d.host_maps = read_self_maps();
8336     if (d.host_maps) {
8337         walk_memory_regions(&d, open_self_maps_2);
8338         free_self_maps(d.host_maps);
8339     } else {
8340         walk_memory_regions(&d, open_self_maps_3);
8341     }
8342     mmap_unlock();
8343     return 0;
8344 }
8345 
8346 static int open_self_maps(CPUArchState *cpu_env, int fd)
8347 {
8348     return open_self_maps_1(cpu_env, fd, false);
8349 }
8350 
8351 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8352 {
8353     return open_self_maps_1(cpu_env, fd, true);
8354 }
8355 
8356 static int open_self_stat(CPUArchState *cpu_env, int fd)
8357 {
8358     CPUState *cpu = env_cpu(cpu_env);
8359     TaskState *ts = get_task_state(cpu);
8360     g_autoptr(GString) buf = g_string_new(NULL);
8361     int i;
8362 
8363     for (i = 0; i < 44; i++) {
8364         if (i == 0) {
8365             /* pid */
8366             g_string_printf(buf, FMT_pid " ", getpid());
8367         } else if (i == 1) {
8368             /* app name */
8369             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8370             bin = bin ? bin + 1 : ts->bprm->argv[0];
8371             g_string_printf(buf, "(%.15s) ", bin);
8372         } else if (i == 2) {
8373             /* task state */
8374             g_string_assign(buf, "R "); /* we are running right now */
8375         } else if (i == 3) {
8376             /* ppid */
8377             g_string_printf(buf, FMT_pid " ", getppid());
8378         } else if (i == 4) {
8379             /* pgid */
8380             g_string_printf(buf, FMT_pid " ", getpgrp());
8381         } else if (i == 19) {
8382             /* num_threads */
8383             int cpus = 0;
8384             WITH_RCU_READ_LOCK_GUARD() {
8385                 CPUState *cpu_iter;
8386                 CPU_FOREACH(cpu_iter) {
8387                     cpus++;
8388                 }
8389             }
8390             g_string_printf(buf, "%d ", cpus);
8391         } else if (i == 21) {
8392             /* starttime */
8393             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8394         } else if (i == 27) {
8395             /* stack bottom */
8396             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8397         } else {
8398             /* for the rest, there is MasterCard */
8399             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8400         }
8401 
8402         if (write(fd, buf->str, buf->len) != buf->len) {
8403             return -1;
8404         }
8405     }
8406 
8407     return 0;
8408 }
8409 
8410 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8411 {
8412     CPUState *cpu = env_cpu(cpu_env);
8413     TaskState *ts = get_task_state(cpu);
8414     abi_ulong auxv = ts->info->saved_auxv;
8415     abi_ulong len = ts->info->auxv_len;
8416     char *ptr;
8417 
8418     /*
8419      * Auxiliary vector is stored in target process stack.
8420      * read in whole auxv vector and copy it to file
8421      */
8422     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8423     if (ptr != NULL) {
8424         while (len > 0) {
8425             ssize_t r;
8426             r = write(fd, ptr, len);
8427             if (r <= 0) {
8428                 break;
8429             }
8430             len -= r;
8431             ptr += r;
8432         }
8433         lseek(fd, 0, SEEK_SET);
8434         unlock_user(ptr, auxv, len);
8435     }
8436 
8437     return 0;
8438 }
8439 
8440 static int is_proc_myself(const char *filename, const char *entry)
8441 {
8442     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8443         filename += strlen("/proc/");
8444         if (!strncmp(filename, "self/", strlen("self/"))) {
8445             filename += strlen("self/");
8446         } else if (*filename >= '1' && *filename <= '9') {
8447             char myself[80];
8448             snprintf(myself, sizeof(myself), "%d/", getpid());
8449             if (!strncmp(filename, myself, strlen(myself))) {
8450                 filename += strlen(myself);
8451             } else {
8452                 return 0;
8453             }
8454         } else {
8455             return 0;
8456         }
8457         if (!strcmp(filename, entry)) {
8458             return 1;
8459         }
8460     }
8461     return 0;
8462 }
8463 
8464 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8465                       const char *fmt, int code)
8466 {
8467     if (logfile) {
8468         CPUState *cs = env_cpu(env);
8469 
8470         fprintf(logfile, fmt, code);
8471         fprintf(logfile, "Failing executable: %s\n", exec_path);
8472         cpu_dump_state(cs, logfile, 0);
8473         open_self_maps(env, fileno(logfile));
8474     }
8475 }
8476 
8477 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8478 {
8479     /* dump to console */
8480     excp_dump_file(stderr, env, fmt, code);
8481 
8482     /* dump to log file */
8483     if (qemu_log_separate()) {
8484         FILE *logfile = qemu_log_trylock();
8485 
8486         excp_dump_file(logfile, env, fmt, code);
8487         qemu_log_unlock(logfile);
8488     }
8489 }
8490 
8491 #include "target_proc.h"
8492 
8493 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8494     defined(HAVE_ARCH_PROC_CPUINFO) || \
8495     defined(HAVE_ARCH_PROC_HARDWARE)
8496 static int is_proc(const char *filename, const char *entry)
8497 {
8498     return strcmp(filename, entry) == 0;
8499 }
8500 #endif
8501 
8502 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8503 static int open_net_route(CPUArchState *cpu_env, int fd)
8504 {
8505     FILE *fp;
8506     char *line = NULL;
8507     size_t len = 0;
8508     ssize_t read;
8509 
8510     fp = fopen("/proc/net/route", "r");
8511     if (fp == NULL) {
8512         return -1;
8513     }
8514 
8515     /* read header */
8516 
8517     read = getline(&line, &len, fp);
8518     dprintf(fd, "%s", line);
8519 
8520     /* read routes */
8521 
8522     while ((read = getline(&line, &len, fp)) != -1) {
8523         char iface[16];
8524         uint32_t dest, gw, mask;
8525         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8526         int fields;
8527 
8528         fields = sscanf(line,
8529                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8530                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8531                         &mask, &mtu, &window, &irtt);
8532         if (fields != 11) {
8533             continue;
8534         }
8535         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8536                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8537                 metric, tswap32(mask), mtu, window, irtt);
8538     }
8539 
8540     free(line);
8541     fclose(fp);
8542 
8543     return 0;
8544 }
8545 #endif
8546 
8547 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8548                               const char *fname, int flags, mode_t mode,
8549                               int openat2_resolve, bool safe)
8550 {
8551     g_autofree char *proc_name = NULL;
8552     const char *pathname;
8553     struct fake_open {
8554         const char *filename;
8555         int (*fill)(CPUArchState *cpu_env, int fd);
8556         int (*cmp)(const char *s1, const char *s2);
8557     };
8558     const struct fake_open *fake_open;
8559     static const struct fake_open fakes[] = {
8560         { "maps", open_self_maps, is_proc_myself },
8561         { "smaps", open_self_smaps, is_proc_myself },
8562         { "stat", open_self_stat, is_proc_myself },
8563         { "auxv", open_self_auxv, is_proc_myself },
8564         { "cmdline", open_self_cmdline, is_proc_myself },
8565 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8566         { "/proc/net/route", open_net_route, is_proc },
8567 #endif
8568 #if defined(HAVE_ARCH_PROC_CPUINFO)
8569         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8570 #endif
8571 #if defined(HAVE_ARCH_PROC_HARDWARE)
8572         { "/proc/hardware", open_hardware, is_proc },
8573 #endif
8574         { NULL, NULL, NULL }
8575     };
8576 
8577     /* if this is a file from /proc/ filesystem, expand full name */
8578     proc_name = realpath(fname, NULL);
8579     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8580         pathname = proc_name;
8581     } else {
8582         pathname = fname;
8583     }
8584 
8585     if (is_proc_myself(pathname, "exe")) {
8586         /* Honor openat2 resolve flags */
8587         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8588             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8589             errno = ELOOP;
8590             return -1;
8591         }
8592         if (safe) {
8593             return safe_openat(dirfd, exec_path, flags, mode);
8594         } else {
8595             return openat(dirfd, exec_path, flags, mode);
8596         }
8597     }
8598 
8599     for (fake_open = fakes; fake_open->filename; fake_open++) {
8600         if (fake_open->cmp(pathname, fake_open->filename)) {
8601             break;
8602         }
8603     }
8604 
8605     if (fake_open->filename) {
8606         const char *tmpdir;
8607         char filename[PATH_MAX];
8608         int fd, r;
8609 
8610         fd = memfd_create("qemu-open", 0);
8611         if (fd < 0) {
8612             if (errno != ENOSYS) {
8613                 return fd;
8614             }
8615             /* create temporary file to map stat to */
8616             tmpdir = getenv("TMPDIR");
8617             if (!tmpdir)
8618                 tmpdir = "/tmp";
8619             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8620             fd = mkstemp(filename);
8621             if (fd < 0) {
8622                 return fd;
8623             }
8624             unlink(filename);
8625         }
8626 
8627         if ((r = fake_open->fill(cpu_env, fd))) {
8628             int e = errno;
8629             close(fd);
8630             errno = e;
8631             return r;
8632         }
8633         lseek(fd, 0, SEEK_SET);
8634 
8635         return fd;
8636     }
8637 
8638     return -2;
8639 }
8640 
8641 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8642                     int flags, mode_t mode, bool safe)
8643 {
8644     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8645     if (fd > -2) {
8646         return fd;
8647     }
8648 
8649     if (safe) {
8650         return safe_openat(dirfd, path(pathname), flags, mode);
8651     } else {
8652         return openat(dirfd, path(pathname), flags, mode);
8653     }
8654 }
8655 
8656 
8657 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8658                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8659                       abi_ulong guest_size)
8660 {
8661     struct open_how_ver0 how = {0};
8662     char *pathname;
8663     int ret;
8664 
8665     if (guest_size < sizeof(struct target_open_how_ver0)) {
8666         return -TARGET_EINVAL;
8667     }
8668     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8669     if (ret) {
8670         if (ret == -TARGET_E2BIG) {
8671             qemu_log_mask(LOG_UNIMP,
8672                           "Unimplemented openat2 open_how size: "
8673                           TARGET_ABI_FMT_lu "\n", guest_size);
8674         }
8675         return ret;
8676     }
8677     pathname = lock_user_string(guest_pathname);
8678     if (!pathname) {
8679         return -TARGET_EFAULT;
8680     }
8681 
8682     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8683     how.mode = tswap64(how.mode);
8684     how.resolve = tswap64(how.resolve);
8685     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8686                                 how.resolve, true);
8687     if (fd > -2) {
8688         ret = get_errno(fd);
8689     } else {
8690         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8691                                      sizeof(struct open_how_ver0)));
8692     }
8693 
8694     fd_trans_unregister(ret);
8695     unlock_user(pathname, guest_pathname, 0);
8696     return ret;
8697 }
8698 
8699 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8700 {
8701     ssize_t ret;
8702 
8703     if (!pathname || !buf) {
8704         errno = EFAULT;
8705         return -1;
8706     }
8707 
8708     if (!bufsiz) {
8709         /* Short circuit this for the magic exe check. */
8710         errno = EINVAL;
8711         return -1;
8712     }
8713 
8714     if (is_proc_myself((const char *)pathname, "exe")) {
8715         /*
8716          * Don't worry about sign mismatch as earlier mapping
8717          * logic would have thrown a bad address error.
8718          */
8719         ret = MIN(strlen(exec_path), bufsiz);
8720         /* We cannot NUL terminate the string. */
8721         memcpy(buf, exec_path, ret);
8722     } else {
8723         ret = readlink(path(pathname), buf, bufsiz);
8724     }
8725 
8726     return ret;
8727 }
8728 
8729 static int do_execv(CPUArchState *cpu_env, int dirfd,
8730                     abi_long pathname, abi_long guest_argp,
8731                     abi_long guest_envp, int flags, bool is_execveat)
8732 {
8733     int ret;
8734     char **argp, **envp;
8735     int argc, envc;
8736     abi_ulong gp;
8737     abi_ulong addr;
8738     char **q;
8739     void *p;
8740 
8741     argc = 0;
8742 
8743     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8744         if (get_user_ual(addr, gp)) {
8745             return -TARGET_EFAULT;
8746         }
8747         if (!addr) {
8748             break;
8749         }
8750         argc++;
8751     }
8752     envc = 0;
8753     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8754         if (get_user_ual(addr, gp)) {
8755             return -TARGET_EFAULT;
8756         }
8757         if (!addr) {
8758             break;
8759         }
8760         envc++;
8761     }
8762 
8763     argp = g_new0(char *, argc + 1);
8764     envp = g_new0(char *, envc + 1);
8765 
8766     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8767         if (get_user_ual(addr, gp)) {
8768             goto execve_efault;
8769         }
8770         if (!addr) {
8771             break;
8772         }
8773         *q = lock_user_string(addr);
8774         if (!*q) {
8775             goto execve_efault;
8776         }
8777     }
8778     *q = NULL;
8779 
8780     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8781         if (get_user_ual(addr, gp)) {
8782             goto execve_efault;
8783         }
8784         if (!addr) {
8785             break;
8786         }
8787         *q = lock_user_string(addr);
8788         if (!*q) {
8789             goto execve_efault;
8790         }
8791     }
8792     *q = NULL;
8793 
8794     /*
8795      * Although execve() is not an interruptible syscall it is
8796      * a special case where we must use the safe_syscall wrapper:
8797      * if we allow a signal to happen before we make the host
8798      * syscall then we will 'lose' it, because at the point of
8799      * execve the process leaves QEMU's control. So we use the
8800      * safe syscall wrapper to ensure that we either take the
8801      * signal as a guest signal, or else it does not happen
8802      * before the execve completes and makes it the other
8803      * program's problem.
8804      */
8805     p = lock_user_string(pathname);
8806     if (!p) {
8807         goto execve_efault;
8808     }
8809 
8810     const char *exe = p;
8811     if (is_proc_myself(p, "exe")) {
8812         exe = exec_path;
8813     }
8814     ret = is_execveat
8815         ? safe_execveat(dirfd, exe, argp, envp, flags)
8816         : safe_execve(exe, argp, envp);
8817     ret = get_errno(ret);
8818 
8819     unlock_user(p, pathname, 0);
8820 
8821     goto execve_end;
8822 
8823 execve_efault:
8824     ret = -TARGET_EFAULT;
8825 
8826 execve_end:
8827     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8828         if (get_user_ual(addr, gp) || !addr) {
8829             break;
8830         }
8831         unlock_user(*q, addr, 0);
8832     }
8833     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8834         if (get_user_ual(addr, gp) || !addr) {
8835             break;
8836         }
8837         unlock_user(*q, addr, 0);
8838     }
8839 
8840     g_free(argp);
8841     g_free(envp);
8842     return ret;
8843 }
8844 
8845 #define TIMER_MAGIC 0x0caf0000
8846 #define TIMER_MAGIC_MASK 0xffff0000
8847 
8848 /* Convert QEMU provided timer ID back to internal 16bit index format */
8849 static target_timer_t get_timer_id(abi_long arg)
8850 {
8851     target_timer_t timerid = arg;
8852 
8853     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8854         return -TARGET_EINVAL;
8855     }
8856 
8857     timerid &= 0xffff;
8858 
8859     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8860         return -TARGET_EINVAL;
8861     }
8862 
8863     return timerid;
8864 }
8865 
8866 static int target_to_host_cpu_mask(unsigned long *host_mask,
8867                                    size_t host_size,
8868                                    abi_ulong target_addr,
8869                                    size_t target_size)
8870 {
8871     unsigned target_bits = sizeof(abi_ulong) * 8;
8872     unsigned host_bits = sizeof(*host_mask) * 8;
8873     abi_ulong *target_mask;
8874     unsigned i, j;
8875 
8876     assert(host_size >= target_size);
8877 
8878     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8879     if (!target_mask) {
8880         return -TARGET_EFAULT;
8881     }
8882     memset(host_mask, 0, host_size);
8883 
8884     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8885         unsigned bit = i * target_bits;
8886         abi_ulong val;
8887 
8888         __get_user(val, &target_mask[i]);
8889         for (j = 0; j < target_bits; j++, bit++) {
8890             if (val & (1UL << j)) {
8891                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8892             }
8893         }
8894     }
8895 
8896     unlock_user(target_mask, target_addr, 0);
8897     return 0;
8898 }
8899 
8900 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8901                                    size_t host_size,
8902                                    abi_ulong target_addr,
8903                                    size_t target_size)
8904 {
8905     unsigned target_bits = sizeof(abi_ulong) * 8;
8906     unsigned host_bits = sizeof(*host_mask) * 8;
8907     abi_ulong *target_mask;
8908     unsigned i, j;
8909 
8910     assert(host_size >= target_size);
8911 
8912     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8913     if (!target_mask) {
8914         return -TARGET_EFAULT;
8915     }
8916 
8917     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8918         unsigned bit = i * target_bits;
8919         abi_ulong val = 0;
8920 
8921         for (j = 0; j < target_bits; j++, bit++) {
8922             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8923                 val |= 1UL << j;
8924             }
8925         }
8926         __put_user(val, &target_mask[i]);
8927     }
8928 
8929     unlock_user(target_mask, target_addr, target_size);
8930     return 0;
8931 }
8932 
8933 #ifdef TARGET_NR_getdents
8934 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8935 {
8936     g_autofree void *hdirp = NULL;
8937     void *tdirp;
8938     int hlen, hoff, toff;
8939     int hreclen, treclen;
8940     off_t prev_diroff = 0;
8941 
8942     hdirp = g_try_malloc(count);
8943     if (!hdirp) {
8944         return -TARGET_ENOMEM;
8945     }
8946 
8947 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8948     hlen = sys_getdents(dirfd, hdirp, count);
8949 #else
8950     hlen = sys_getdents64(dirfd, hdirp, count);
8951 #endif
8952 
8953     hlen = get_errno(hlen);
8954     if (is_error(hlen)) {
8955         return hlen;
8956     }
8957 
8958     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8959     if (!tdirp) {
8960         return -TARGET_EFAULT;
8961     }
8962 
8963     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8964 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8965         struct linux_dirent *hde = hdirp + hoff;
8966 #else
8967         struct linux_dirent64 *hde = hdirp + hoff;
8968 #endif
8969         struct target_dirent *tde = tdirp + toff;
8970         int namelen;
8971         uint8_t type;
8972 
8973         namelen = strlen(hde->d_name);
8974         hreclen = hde->d_reclen;
8975         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8976         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8977 
8978         if (toff + treclen > count) {
8979             /*
8980              * If the host struct is smaller than the target struct, or
8981              * requires less alignment and thus packs into less space,
8982              * then the host can return more entries than we can pass
8983              * on to the guest.
8984              */
8985             if (toff == 0) {
8986                 toff = -TARGET_EINVAL; /* result buffer is too small */
8987                 break;
8988             }
8989             /*
8990              * Return what we have, resetting the file pointer to the
8991              * location of the first record not returned.
8992              */
8993             lseek(dirfd, prev_diroff, SEEK_SET);
8994             break;
8995         }
8996 
8997         prev_diroff = hde->d_off;
8998         tde->d_ino = tswapal(hde->d_ino);
8999         tde->d_off = tswapal(hde->d_off);
9000         tde->d_reclen = tswap16(treclen);
9001         memcpy(tde->d_name, hde->d_name, namelen + 1);
9002 
9003         /*
9004          * The getdents type is in what was formerly a padding byte at the
9005          * end of the structure.
9006          */
9007 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9008         type = *((uint8_t *)hde + hreclen - 1);
9009 #else
9010         type = hde->d_type;
9011 #endif
9012         *((uint8_t *)tde + treclen - 1) = type;
9013     }
9014 
9015     unlock_user(tdirp, arg2, toff);
9016     return toff;
9017 }
9018 #endif /* TARGET_NR_getdents */
9019 
9020 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9021 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
9022 {
9023     g_autofree void *hdirp = NULL;
9024     void *tdirp;
9025     int hlen, hoff, toff;
9026     int hreclen, treclen;
9027     off_t prev_diroff = 0;
9028 
9029     hdirp = g_try_malloc(count);
9030     if (!hdirp) {
9031         return -TARGET_ENOMEM;
9032     }
9033 
9034     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
9035     if (is_error(hlen)) {
9036         return hlen;
9037     }
9038 
9039     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9040     if (!tdirp) {
9041         return -TARGET_EFAULT;
9042     }
9043 
9044     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
9045         struct linux_dirent64 *hde = hdirp + hoff;
9046         struct target_dirent64 *tde = tdirp + toff;
9047         int namelen;
9048 
9049         namelen = strlen(hde->d_name) + 1;
9050         hreclen = hde->d_reclen;
9051         treclen = offsetof(struct target_dirent64, d_name) + namelen;
9052         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
9053 
9054         if (toff + treclen > count) {
9055             /*
9056              * If the host struct is smaller than the target struct, or
9057              * requires less alignment and thus packs into less space,
9058              * then the host can return more entries than we can pass
9059              * on to the guest.
9060              */
9061             if (toff == 0) {
9062                 toff = -TARGET_EINVAL; /* result buffer is too small */
9063                 break;
9064             }
9065             /*
9066              * Return what we have, resetting the file pointer to the
9067              * location of the first record not returned.
9068              */
9069             lseek(dirfd, prev_diroff, SEEK_SET);
9070             break;
9071         }
9072 
9073         prev_diroff = hde->d_off;
9074         tde->d_ino = tswap64(hde->d_ino);
9075         tde->d_off = tswap64(hde->d_off);
9076         tde->d_reclen = tswap16(treclen);
9077         tde->d_type = hde->d_type;
9078         memcpy(tde->d_name, hde->d_name, namelen);
9079     }
9080 
9081     unlock_user(tdirp, arg2, toff);
9082     return toff;
9083 }
9084 #endif /* TARGET_NR_getdents64 */
9085 
9086 #if defined(TARGET_NR_riscv_hwprobe)
9087 
9088 #define RISCV_HWPROBE_KEY_MVENDORID     0
9089 #define RISCV_HWPROBE_KEY_MARCHID       1
9090 #define RISCV_HWPROBE_KEY_MIMPID        2
9091 
9092 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9093 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9094 
9095 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
9096 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
9097 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
9098 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
9099 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
9100 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
9101 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
9102 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
9103 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
9104 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
9105 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
9106 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
9107 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
9108 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
9109 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
9110 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
9111 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
9112 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
9113 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
9114 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
9115 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
9116 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
9117 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
9118 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
9119 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
9120 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
9121 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
9122 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
9123 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
9124 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
9125 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
9126 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
9127 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
9128 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
9129 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
9130 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
9131 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
9132 #define     RISCV_HWPROBE_EXT_ZIHINTPAUSE   (1ULL << 36)
9133 #define     RISCV_HWPROBE_EXT_ZVE32X        (1ULL << 37)
9134 #define     RISCV_HWPROBE_EXT_ZVE32F        (1ULL << 38)
9135 #define     RISCV_HWPROBE_EXT_ZVE64X        (1ULL << 39)
9136 #define     RISCV_HWPROBE_EXT_ZVE64F        (1ULL << 40)
9137 #define     RISCV_HWPROBE_EXT_ZVE64D        (1ULL << 41)
9138 #define     RISCV_HWPROBE_EXT_ZIMOP         (1ULL << 42)
9139 #define     RISCV_HWPROBE_EXT_ZCA           (1ULL << 43)
9140 #define     RISCV_HWPROBE_EXT_ZCB           (1ULL << 44)
9141 #define     RISCV_HWPROBE_EXT_ZCD           (1ULL << 45)
9142 #define     RISCV_HWPROBE_EXT_ZCF           (1ULL << 46)
9143 #define     RISCV_HWPROBE_EXT_ZCMOP         (1ULL << 47)
9144 #define     RISCV_HWPROBE_EXT_ZAWRS         (1ULL << 48)
9145 #define     RISCV_HWPROBE_EXT_SUPM          (1ULL << 49)
9146 #define     RISCV_HWPROBE_EXT_ZICNTR        (1ULL << 50)
9147 #define     RISCV_HWPROBE_EXT_ZIHPM         (1ULL << 51)
9148 #define     RISCV_HWPROBE_EXT_ZFBFMIN       (1ULL << 52)
9149 #define     RISCV_HWPROBE_EXT_ZVFBFMIN      (1ULL << 53)
9150 #define     RISCV_HWPROBE_EXT_ZVFBFWMA      (1ULL << 54)
9151 #define     RISCV_HWPROBE_EXT_ZICBOM        (1ULL << 55)
9152 #define     RISCV_HWPROBE_EXT_ZAAMO         (1ULL << 56)
9153 #define     RISCV_HWPROBE_EXT_ZALRSC        (1ULL << 57)
9154 #define     RISCV_HWPROBE_EXT_ZABHA         (1ULL << 58)
9155 
9156 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9157 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9158 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9159 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9160 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9161 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9162 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9163 
9164 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9165 #define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7
9166 #define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8
9167 #define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF        9
9168 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN     0
9169 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED    1
9170 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW        2
9171 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_FAST        3
9172 #define     RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
9173 #define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10
9174 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN     0
9175 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW        2
9176 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_FAST        3
9177 #define     RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
9178 #define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0     11
9179 #define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE      12
9180 #define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0    13
9181 
9182 struct riscv_hwprobe {
9183     abi_llong  key;
9184     abi_ullong value;
9185 };
9186 
9187 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9188                                     struct riscv_hwprobe *pair,
9189                                     size_t pair_count)
9190 {
9191     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9192 
9193     for (; pair_count > 0; pair_count--, pair++) {
9194         abi_llong key;
9195         abi_ullong value;
9196         __put_user(0, &pair->value);
9197         __get_user(key, &pair->key);
9198         switch (key) {
9199         case RISCV_HWPROBE_KEY_MVENDORID:
9200             __put_user(cfg->mvendorid, &pair->value);
9201             break;
9202         case RISCV_HWPROBE_KEY_MARCHID:
9203             __put_user(cfg->marchid, &pair->value);
9204             break;
9205         case RISCV_HWPROBE_KEY_MIMPID:
9206             __put_user(cfg->mimpid, &pair->value);
9207             break;
9208         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9209             value = riscv_has_ext(env, RVI) &&
9210                     riscv_has_ext(env, RVM) &&
9211                     riscv_has_ext(env, RVA) ?
9212                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9213             __put_user(value, &pair->value);
9214             break;
9215         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9216             value = riscv_has_ext(env, RVF) &&
9217                     riscv_has_ext(env, RVD) ?
9218                     RISCV_HWPROBE_IMA_FD : 0;
9219             value |= riscv_has_ext(env, RVC) ?
9220                      RISCV_HWPROBE_IMA_C : 0;
9221             value |= riscv_has_ext(env, RVV) ?
9222                      RISCV_HWPROBE_IMA_V : 0;
9223             value |= cfg->ext_zba ?
9224                      RISCV_HWPROBE_EXT_ZBA : 0;
9225             value |= cfg->ext_zbb ?
9226                      RISCV_HWPROBE_EXT_ZBB : 0;
9227             value |= cfg->ext_zbs ?
9228                      RISCV_HWPROBE_EXT_ZBS : 0;
9229             value |= cfg->ext_zicboz ?
9230                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9231             value |= cfg->ext_zbc ?
9232                      RISCV_HWPROBE_EXT_ZBC : 0;
9233             value |= cfg->ext_zbkb ?
9234                      RISCV_HWPROBE_EXT_ZBKB : 0;
9235             value |= cfg->ext_zbkc ?
9236                      RISCV_HWPROBE_EXT_ZBKC : 0;
9237             value |= cfg->ext_zbkx ?
9238                      RISCV_HWPROBE_EXT_ZBKX : 0;
9239             value |= cfg->ext_zknd ?
9240                      RISCV_HWPROBE_EXT_ZKND : 0;
9241             value |= cfg->ext_zkne ?
9242                      RISCV_HWPROBE_EXT_ZKNE : 0;
9243             value |= cfg->ext_zknh ?
9244                      RISCV_HWPROBE_EXT_ZKNH : 0;
9245             value |= cfg->ext_zksed ?
9246                      RISCV_HWPROBE_EXT_ZKSED : 0;
9247             value |= cfg->ext_zksh ?
9248                      RISCV_HWPROBE_EXT_ZKSH : 0;
9249             value |= cfg->ext_zkt ?
9250                      RISCV_HWPROBE_EXT_ZKT : 0;
9251             value |= cfg->ext_zvbb ?
9252                      RISCV_HWPROBE_EXT_ZVBB : 0;
9253             value |= cfg->ext_zvbc ?
9254                      RISCV_HWPROBE_EXT_ZVBC : 0;
9255             value |= cfg->ext_zvkb ?
9256                      RISCV_HWPROBE_EXT_ZVKB : 0;
9257             value |= cfg->ext_zvkg ?
9258                      RISCV_HWPROBE_EXT_ZVKG : 0;
9259             value |= cfg->ext_zvkned ?
9260                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9261             value |= cfg->ext_zvknha ?
9262                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9263             value |= cfg->ext_zvknhb ?
9264                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9265             value |= cfg->ext_zvksed ?
9266                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9267             value |= cfg->ext_zvksh ?
9268                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9269             value |= cfg->ext_zvkt ?
9270                      RISCV_HWPROBE_EXT_ZVKT : 0;
9271             value |= cfg->ext_zfh ?
9272                      RISCV_HWPROBE_EXT_ZFH : 0;
9273             value |= cfg->ext_zfhmin ?
9274                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9275             value |= cfg->ext_zihintntl ?
9276                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9277             value |= cfg->ext_zvfh ?
9278                      RISCV_HWPROBE_EXT_ZVFH : 0;
9279             value |= cfg->ext_zvfhmin ?
9280                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9281             value |= cfg->ext_zfa ?
9282                      RISCV_HWPROBE_EXT_ZFA : 0;
9283             value |= cfg->ext_ztso ?
9284                      RISCV_HWPROBE_EXT_ZTSO : 0;
9285             value |= cfg->ext_zacas ?
9286                      RISCV_HWPROBE_EXT_ZACAS : 0;
9287             value |= cfg->ext_zicond ?
9288                      RISCV_HWPROBE_EXT_ZICOND : 0;
9289             value |= cfg->ext_zihintpause ?
9290                      RISCV_HWPROBE_EXT_ZIHINTPAUSE : 0;
9291             value |= cfg->ext_zve32x ?
9292                      RISCV_HWPROBE_EXT_ZVE32X : 0;
9293             value |= cfg->ext_zve32f ?
9294                      RISCV_HWPROBE_EXT_ZVE32F : 0;
9295             value |= cfg->ext_zve64x ?
9296                      RISCV_HWPROBE_EXT_ZVE64X : 0;
9297             value |= cfg->ext_zve64f ?
9298                      RISCV_HWPROBE_EXT_ZVE64F : 0;
9299             value |= cfg->ext_zve64d ?
9300                      RISCV_HWPROBE_EXT_ZVE64D : 0;
9301             value |= cfg->ext_zimop ?
9302                      RISCV_HWPROBE_EXT_ZIMOP : 0;
9303             value |= cfg->ext_zca ?
9304                      RISCV_HWPROBE_EXT_ZCA : 0;
9305             value |= cfg->ext_zcb ?
9306                      RISCV_HWPROBE_EXT_ZCB : 0;
9307             value |= cfg->ext_zcd ?
9308                      RISCV_HWPROBE_EXT_ZCD : 0;
9309             value |= cfg->ext_zcf ?
9310                      RISCV_HWPROBE_EXT_ZCF : 0;
9311             value |= cfg->ext_zcmop ?
9312                      RISCV_HWPROBE_EXT_ZCMOP : 0;
9313             value |= cfg->ext_zawrs ?
9314                      RISCV_HWPROBE_EXT_ZAWRS : 0;
9315             value |= cfg->ext_supm ?
9316                      RISCV_HWPROBE_EXT_SUPM : 0;
9317             value |= cfg->ext_zicntr ?
9318                      RISCV_HWPROBE_EXT_ZICNTR : 0;
9319             value |= cfg->ext_zihpm ?
9320                      RISCV_HWPROBE_EXT_ZIHPM : 0;
9321             value |= cfg->ext_zfbfmin ?
9322                      RISCV_HWPROBE_EXT_ZFBFMIN : 0;
9323             value |= cfg->ext_zvfbfmin ?
9324                      RISCV_HWPROBE_EXT_ZVFBFMIN : 0;
9325             value |= cfg->ext_zvfbfwma ?
9326                      RISCV_HWPROBE_EXT_ZVFBFWMA : 0;
9327             value |= cfg->ext_zicbom ?
9328                      RISCV_HWPROBE_EXT_ZICBOM : 0;
9329             value |= cfg->ext_zaamo ?
9330                      RISCV_HWPROBE_EXT_ZAAMO : 0;
9331             value |= cfg->ext_zalrsc ?
9332                      RISCV_HWPROBE_EXT_ZALRSC : 0;
9333             value |= cfg->ext_zabha ?
9334                      RISCV_HWPROBE_EXT_ZABHA : 0;
9335             __put_user(value, &pair->value);
9336             break;
9337         case RISCV_HWPROBE_KEY_CPUPERF_0:
9338             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9339             break;
9340         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9341             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9342             __put_user(value, &pair->value);
9343             break;
9344         case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
9345             value = cfg->ext_zicbom ? cfg->cbom_blocksize : 0;
9346             __put_user(value, &pair->value);
9347             break;
9348         default:
9349             __put_user(-1, &pair->key);
9350             break;
9351         }
9352     }
9353 }
9354 
9355 /*
9356  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9357  * If the cpumast_t has no bits set: -EINVAL.
9358  * Otherwise the cpumask_t contains some bit set: 0.
9359  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9360  * nor bound the search by cpumask_size().
9361  */
9362 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9363 {
9364     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9365     int ret = -TARGET_EFAULT;
9366 
9367     if (p) {
9368         ret = -TARGET_EINVAL;
9369         /*
9370          * Since we only care about the empty/non-empty state of the cpumask_t
9371          * not the individual bits, we do not need to repartition the bits
9372          * from target abi_ulong to host unsigned long.
9373          *
9374          * Note that the kernel does not round up cpusetsize to a multiple of
9375          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9376          * it copies exactly cpusetsize bytes into a zeroed buffer.
9377          */
9378         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9379             if (p[i]) {
9380                 ret = 0;
9381                 break;
9382             }
9383         }
9384         unlock_user(p, target_cpus, 0);
9385     }
9386     return ret;
9387 }
9388 
9389 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9390                                  abi_long arg2, abi_long arg3,
9391                                  abi_long arg4, abi_long arg5)
9392 {
9393     int ret;
9394     struct riscv_hwprobe *host_pairs;
9395 
9396     /* flags must be 0 */
9397     if (arg5 != 0) {
9398         return -TARGET_EINVAL;
9399     }
9400 
9401     /* check cpu_set */
9402     if (arg3 != 0) {
9403         ret = nonempty_cpu_set(arg3, arg4);
9404         if (ret != 0) {
9405             return ret;
9406         }
9407     } else if (arg4 != 0) {
9408         return -TARGET_EINVAL;
9409     }
9410 
9411     /* no pairs */
9412     if (arg2 == 0) {
9413         return 0;
9414     }
9415 
9416     host_pairs = lock_user(VERIFY_WRITE, arg1,
9417                            sizeof(*host_pairs) * (size_t)arg2, 0);
9418     if (host_pairs == NULL) {
9419         return -TARGET_EFAULT;
9420     }
9421     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9422     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9423     return 0;
9424 }
9425 #endif /* TARGET_NR_riscv_hwprobe */
9426 
9427 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9428 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9429 #endif
9430 
9431 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9432 #define __NR_sys_open_tree __NR_open_tree
9433 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9434           unsigned int, __flags)
9435 #endif
9436 
9437 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9438 #define __NR_sys_move_mount __NR_move_mount
9439 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9440            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9441 #endif
9442 
9443 /* This is an internal helper for do_syscall so that it is easier
9444  * to have a single return point, so that actions, such as logging
9445  * of syscall results, can be performed.
9446  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9447  */
9448 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9449                             abi_long arg2, abi_long arg3, abi_long arg4,
9450                             abi_long arg5, abi_long arg6, abi_long arg7,
9451                             abi_long arg8)
9452 {
9453     CPUState *cpu = env_cpu(cpu_env);
9454     abi_long ret;
9455 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9456     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9457     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9458     || defined(TARGET_NR_statx)
9459     struct stat st;
9460 #endif
9461 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9462     || defined(TARGET_NR_fstatfs)
9463     struct statfs stfs;
9464 #endif
9465     void *p;
9466 
9467     switch(num) {
9468     case TARGET_NR_exit:
9469         /* In old applications this may be used to implement _exit(2).
9470            However in threaded applications it is used for thread termination,
9471            and _exit_group is used for application termination.
9472            Do thread termination if we have more then one thread.  */
9473 
9474         if (block_signals()) {
9475             return -QEMU_ERESTARTSYS;
9476         }
9477 
9478         pthread_mutex_lock(&clone_lock);
9479 
9480         if (CPU_NEXT(first_cpu)) {
9481             TaskState *ts = get_task_state(cpu);
9482 
9483             if (ts->child_tidptr) {
9484                 put_user_u32(0, ts->child_tidptr);
9485                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9486                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9487             }
9488 
9489 #ifdef TARGET_AARCH64
9490             if (ts->gcs_base) {
9491                 target_munmap(ts->gcs_base, ts->gcs_size);
9492             }
9493 #endif
9494 
9495             object_unparent(OBJECT(cpu));
9496             object_unref(OBJECT(cpu));
9497             /*
9498              * At this point the CPU should be unrealized and removed
9499              * from cpu lists. We can clean-up the rest of the thread
9500              * data without the lock held.
9501              */
9502 
9503             pthread_mutex_unlock(&clone_lock);
9504 
9505             thread_cpu = NULL;
9506             g_free(ts);
9507             rcu_unregister_thread();
9508             pthread_exit(NULL);
9509         }
9510 
9511         pthread_mutex_unlock(&clone_lock);
9512         preexit_cleanup(cpu_env, arg1);
9513         _exit(arg1);
9514         return 0; /* avoid warning */
9515     case TARGET_NR_read:
9516         if (arg2 == 0 && arg3 == 0) {
9517             return get_errno(safe_read(arg1, 0, 0));
9518         } else {
9519             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9520                 return -TARGET_EFAULT;
9521             ret = get_errno(safe_read(arg1, p, arg3));
9522             if (ret >= 0 &&
9523                 fd_trans_host_to_target_data(arg1)) {
9524                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9525             }
9526             unlock_user(p, arg2, ret);
9527         }
9528         return ret;
9529     case TARGET_NR_write:
9530         if (arg2 == 0 && arg3 == 0) {
9531             return get_errno(safe_write(arg1, 0, 0));
9532         }
9533         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9534             return -TARGET_EFAULT;
9535         if (fd_trans_target_to_host_data(arg1)) {
9536             void *copy = g_malloc(arg3);
9537             memcpy(copy, p, arg3);
9538             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9539             if (ret >= 0) {
9540                 ret = get_errno(safe_write(arg1, copy, ret));
9541             }
9542             g_free(copy);
9543         } else {
9544             ret = get_errno(safe_write(arg1, p, arg3));
9545         }
9546         unlock_user(p, arg2, 0);
9547         return ret;
9548 
9549 #ifdef TARGET_NR_open
9550     case TARGET_NR_open:
9551         if (!(p = lock_user_string(arg1)))
9552             return -TARGET_EFAULT;
9553         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9554                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9555                                   arg3, true));
9556         fd_trans_unregister(ret);
9557         unlock_user(p, arg1, 0);
9558         return ret;
9559 #endif
9560     case TARGET_NR_openat:
9561         if (!(p = lock_user_string(arg2)))
9562             return -TARGET_EFAULT;
9563         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9564                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9565                                   arg4, true));
9566         fd_trans_unregister(ret);
9567         unlock_user(p, arg2, 0);
9568         return ret;
9569     case TARGET_NR_openat2:
9570         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9571         return ret;
9572 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9573     case TARGET_NR_name_to_handle_at:
9574         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9575         return ret;
9576 #endif
9577 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9578     case TARGET_NR_open_by_handle_at:
9579         ret = do_open_by_handle_at(arg1, arg2, arg3);
9580         fd_trans_unregister(ret);
9581         return ret;
9582 #endif
9583 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9584     case TARGET_NR_pidfd_open:
9585         return get_errno(pidfd_open(arg1, arg2));
9586 #endif
9587 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9588     case TARGET_NR_pidfd_send_signal:
9589         {
9590             siginfo_t uinfo, *puinfo;
9591 
9592             if (arg3) {
9593                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9594                 if (!p) {
9595                     return -TARGET_EFAULT;
9596                  }
9597                  target_to_host_siginfo(&uinfo, p);
9598                  unlock_user(p, arg3, 0);
9599                  puinfo = &uinfo;
9600             } else {
9601                  puinfo = NULL;
9602             }
9603             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9604                                               puinfo, arg4));
9605         }
9606         return ret;
9607 #endif
9608 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9609     case TARGET_NR_pidfd_getfd:
9610         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9611 #endif
9612     case TARGET_NR_close:
9613         fd_trans_unregister(arg1);
9614         return get_errno(close(arg1));
9615 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9616     case TARGET_NR_close_range:
9617         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9618         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9619             abi_long fd, maxfd;
9620             maxfd = MIN(arg2, target_fd_max);
9621             for (fd = arg1; fd < maxfd; fd++) {
9622                 fd_trans_unregister(fd);
9623             }
9624         }
9625         return ret;
9626 #endif
9627 
9628     case TARGET_NR_brk:
9629         return do_brk(arg1);
9630 #ifdef TARGET_NR_fork
9631     case TARGET_NR_fork:
9632         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9633 #endif
9634 #ifdef TARGET_NR_waitpid
9635     case TARGET_NR_waitpid:
9636         {
9637             int status;
9638             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9639             if (!is_error(ret) && arg2 && ret
9640                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9641                 return -TARGET_EFAULT;
9642         }
9643         return ret;
9644 #endif
9645 #ifdef TARGET_NR_waitid
9646     case TARGET_NR_waitid:
9647         {
9648             struct rusage ru;
9649             siginfo_t info;
9650 
9651             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9652                                         arg4, (arg5 ? &ru : NULL)));
9653             if (!is_error(ret)) {
9654                 if (arg3) {
9655                     p = lock_user(VERIFY_WRITE, arg3,
9656                                   sizeof(target_siginfo_t), 0);
9657                     if (!p) {
9658                         return -TARGET_EFAULT;
9659                     }
9660                     host_to_target_siginfo(p, &info);
9661                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9662                 }
9663                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9664                     return -TARGET_EFAULT;
9665                 }
9666             }
9667         }
9668         return ret;
9669 #endif
9670 #ifdef TARGET_NR_creat /* not on alpha */
9671     case TARGET_NR_creat:
9672         if (!(p = lock_user_string(arg1)))
9673             return -TARGET_EFAULT;
9674         ret = get_errno(creat(p, arg2));
9675         fd_trans_unregister(ret);
9676         unlock_user(p, arg1, 0);
9677         return ret;
9678 #endif
9679 #ifdef TARGET_NR_link
9680     case TARGET_NR_link:
9681         {
9682             void * p2;
9683             p = lock_user_string(arg1);
9684             p2 = lock_user_string(arg2);
9685             if (!p || !p2)
9686                 ret = -TARGET_EFAULT;
9687             else
9688                 ret = get_errno(link(p, p2));
9689             unlock_user(p2, arg2, 0);
9690             unlock_user(p, arg1, 0);
9691         }
9692         return ret;
9693 #endif
9694 #if defined(TARGET_NR_linkat)
9695     case TARGET_NR_linkat:
9696         {
9697             void * p2 = NULL;
9698             if (!arg2 || !arg4)
9699                 return -TARGET_EFAULT;
9700             p  = lock_user_string(arg2);
9701             p2 = lock_user_string(arg4);
9702             if (!p || !p2)
9703                 ret = -TARGET_EFAULT;
9704             else
9705                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9706             unlock_user(p, arg2, 0);
9707             unlock_user(p2, arg4, 0);
9708         }
9709         return ret;
9710 #endif
9711 #ifdef TARGET_NR_unlink
9712     case TARGET_NR_unlink:
9713         if (!(p = lock_user_string(arg1)))
9714             return -TARGET_EFAULT;
9715         ret = get_errno(unlink(p));
9716         unlock_user(p, arg1, 0);
9717         return ret;
9718 #endif
9719 #if defined(TARGET_NR_unlinkat)
9720     case TARGET_NR_unlinkat:
9721         if (!(p = lock_user_string(arg2)))
9722             return -TARGET_EFAULT;
9723         ret = get_errno(unlinkat(arg1, p, arg3));
9724         unlock_user(p, arg2, 0);
9725         return ret;
9726 #endif
9727     case TARGET_NR_execveat:
9728         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9729     case TARGET_NR_execve:
9730         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9731     case TARGET_NR_chdir:
9732         if (!(p = lock_user_string(arg1)))
9733             return -TARGET_EFAULT;
9734         ret = get_errno(chdir(p));
9735         unlock_user(p, arg1, 0);
9736         return ret;
9737 #ifdef TARGET_NR_time
9738     case TARGET_NR_time:
9739         {
9740             time_t host_time;
9741             ret = get_errno(time(&host_time));
9742             if (!is_error(ret)
9743                 && arg1
9744                 && put_user_sal(host_time, arg1))
9745                 return -TARGET_EFAULT;
9746         }
9747         return ret;
9748 #endif
9749 #ifdef TARGET_NR_mknod
9750     case TARGET_NR_mknod:
9751         if (!(p = lock_user_string(arg1)))
9752             return -TARGET_EFAULT;
9753         ret = get_errno(mknod(p, arg2, arg3));
9754         unlock_user(p, arg1, 0);
9755         return ret;
9756 #endif
9757 #if defined(TARGET_NR_mknodat)
9758     case TARGET_NR_mknodat:
9759         if (!(p = lock_user_string(arg2)))
9760             return -TARGET_EFAULT;
9761         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9762         unlock_user(p, arg2, 0);
9763         return ret;
9764 #endif
9765 #ifdef TARGET_NR_chmod
9766     case TARGET_NR_chmod:
9767         if (!(p = lock_user_string(arg1)))
9768             return -TARGET_EFAULT;
9769         ret = get_errno(chmod(p, arg2));
9770         unlock_user(p, arg1, 0);
9771         return ret;
9772 #endif
9773 #ifdef TARGET_NR_lseek
9774     case TARGET_NR_lseek:
9775         return get_errno(lseek(arg1, arg2, arg3));
9776 #endif
9777 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9778     /* Alpha specific */
9779     case TARGET_NR_getxpid:
9780         cpu_env->ir[IR_A4] = getppid();
9781         return get_errno(getpid());
9782 #endif
9783 #ifdef TARGET_NR_getpid
9784     case TARGET_NR_getpid:
9785         return get_errno(getpid());
9786 #endif
9787     case TARGET_NR_mount:
9788         {
9789             /* need to look at the data field */
9790             void *p2, *p3;
9791 
9792             if (arg1) {
9793                 p = lock_user_string(arg1);
9794                 if (!p) {
9795                     return -TARGET_EFAULT;
9796                 }
9797             } else {
9798                 p = NULL;
9799             }
9800 
9801             p2 = lock_user_string(arg2);
9802             if (!p2) {
9803                 if (arg1) {
9804                     unlock_user(p, arg1, 0);
9805                 }
9806                 return -TARGET_EFAULT;
9807             }
9808 
9809             if (arg3) {
9810                 p3 = lock_user_string(arg3);
9811                 if (!p3) {
9812                     if (arg1) {
9813                         unlock_user(p, arg1, 0);
9814                     }
9815                     unlock_user(p2, arg2, 0);
9816                     return -TARGET_EFAULT;
9817                 }
9818             } else {
9819                 p3 = NULL;
9820             }
9821 
9822             /* FIXME - arg5 should be locked, but it isn't clear how to
9823              * do that since it's not guaranteed to be a NULL-terminated
9824              * string.
9825              */
9826             if (!arg5) {
9827                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9828             } else {
9829                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9830             }
9831             ret = get_errno(ret);
9832 
9833             if (arg1) {
9834                 unlock_user(p, arg1, 0);
9835             }
9836             unlock_user(p2, arg2, 0);
9837             if (arg3) {
9838                 unlock_user(p3, arg3, 0);
9839             }
9840         }
9841         return ret;
9842 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9843 #if defined(TARGET_NR_umount)
9844     case TARGET_NR_umount:
9845 #endif
9846 #if defined(TARGET_NR_oldumount)
9847     case TARGET_NR_oldumount:
9848 #endif
9849         if (!(p = lock_user_string(arg1)))
9850             return -TARGET_EFAULT;
9851         ret = get_errno(umount(p));
9852         unlock_user(p, arg1, 0);
9853         return ret;
9854 #endif
9855 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9856     case TARGET_NR_move_mount:
9857         {
9858             void *p2, *p4;
9859 
9860             if (!arg2 || !arg4) {
9861                 return -TARGET_EFAULT;
9862             }
9863 
9864             p2 = lock_user_string(arg2);
9865             if (!p2) {
9866                 return -TARGET_EFAULT;
9867             }
9868 
9869             p4 = lock_user_string(arg4);
9870             if (!p4) {
9871                 unlock_user(p2, arg2, 0);
9872                 return -TARGET_EFAULT;
9873             }
9874             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9875 
9876             unlock_user(p2, arg2, 0);
9877             unlock_user(p4, arg4, 0);
9878 
9879             return ret;
9880         }
9881 #endif
9882 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9883     case TARGET_NR_open_tree:
9884         {
9885             void *p2;
9886             int host_flags;
9887 
9888             if (!arg2) {
9889                 return -TARGET_EFAULT;
9890             }
9891 
9892             p2 = lock_user_string(arg2);
9893             if (!p2) {
9894                 return -TARGET_EFAULT;
9895             }
9896 
9897             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9898             if (arg3 & TARGET_O_CLOEXEC) {
9899                 host_flags |= O_CLOEXEC;
9900             }
9901 
9902             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9903 
9904             unlock_user(p2, arg2, 0);
9905 
9906             return ret;
9907         }
9908 #endif
9909 #ifdef TARGET_NR_stime /* not on alpha */
9910     case TARGET_NR_stime:
9911         {
9912             struct timespec ts;
9913             ts.tv_nsec = 0;
9914             if (get_user_sal(ts.tv_sec, arg1)) {
9915                 return -TARGET_EFAULT;
9916             }
9917             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9918         }
9919 #endif
9920 #ifdef TARGET_NR_alarm /* not on alpha */
9921     case TARGET_NR_alarm:
9922         return alarm(arg1);
9923 #endif
9924 #ifdef TARGET_NR_pause /* not on alpha */
9925     case TARGET_NR_pause:
9926         if (!block_signals()) {
9927             sigsuspend(&get_task_state(cpu)->signal_mask);
9928         }
9929         return -TARGET_EINTR;
9930 #endif
9931 #ifdef TARGET_NR_utime
9932     case TARGET_NR_utime:
9933         {
9934             struct utimbuf tbuf, *host_tbuf;
9935             struct target_utimbuf *target_tbuf;
9936             if (arg2) {
9937                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9938                     return -TARGET_EFAULT;
9939                 tbuf.actime = tswapal(target_tbuf->actime);
9940                 tbuf.modtime = tswapal(target_tbuf->modtime);
9941                 unlock_user_struct(target_tbuf, arg2, 0);
9942                 host_tbuf = &tbuf;
9943             } else {
9944                 host_tbuf = NULL;
9945             }
9946             if (!(p = lock_user_string(arg1)))
9947                 return -TARGET_EFAULT;
9948             ret = get_errno(utime(p, host_tbuf));
9949             unlock_user(p, arg1, 0);
9950         }
9951         return ret;
9952 #endif
9953 #ifdef TARGET_NR_utimes
9954     case TARGET_NR_utimes:
9955         {
9956             struct timeval *tvp, tv[2];
9957             if (arg2) {
9958                 if (copy_from_user_timeval(&tv[0], arg2)
9959                     || copy_from_user_timeval(&tv[1],
9960                                               arg2 + sizeof(struct target_timeval)))
9961                     return -TARGET_EFAULT;
9962                 tvp = tv;
9963             } else {
9964                 tvp = NULL;
9965             }
9966             if (!(p = lock_user_string(arg1)))
9967                 return -TARGET_EFAULT;
9968             ret = get_errno(utimes(p, tvp));
9969             unlock_user(p, arg1, 0);
9970         }
9971         return ret;
9972 #endif
9973 #if defined(TARGET_NR_futimesat)
9974     case TARGET_NR_futimesat:
9975         {
9976             struct timeval *tvp, tv[2];
9977             if (arg3) {
9978                 if (copy_from_user_timeval(&tv[0], arg3)
9979                     || copy_from_user_timeval(&tv[1],
9980                                               arg3 + sizeof(struct target_timeval)))
9981                     return -TARGET_EFAULT;
9982                 tvp = tv;
9983             } else {
9984                 tvp = NULL;
9985             }
9986             if (!(p = lock_user_string(arg2))) {
9987                 return -TARGET_EFAULT;
9988             }
9989             ret = get_errno(futimesat(arg1, path(p), tvp));
9990             unlock_user(p, arg2, 0);
9991         }
9992         return ret;
9993 #endif
9994 #ifdef TARGET_NR_access
9995     case TARGET_NR_access:
9996         if (!(p = lock_user_string(arg1))) {
9997             return -TARGET_EFAULT;
9998         }
9999         ret = get_errno(access(path(p), arg2));
10000         unlock_user(p, arg1, 0);
10001         return ret;
10002 #endif
10003 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
10004     case TARGET_NR_faccessat:
10005         if (!(p = lock_user_string(arg2))) {
10006             return -TARGET_EFAULT;
10007         }
10008         ret = get_errno(faccessat(arg1, p, arg3, 0));
10009         unlock_user(p, arg2, 0);
10010         return ret;
10011 #endif
10012 #if defined(TARGET_NR_faccessat2)
10013     case TARGET_NR_faccessat2:
10014         if (!(p = lock_user_string(arg2))) {
10015             return -TARGET_EFAULT;
10016         }
10017         ret = get_errno(faccessat(arg1, p, arg3, arg4));
10018         unlock_user(p, arg2, 0);
10019         return ret;
10020 #endif
10021 #ifdef TARGET_NR_nice /* not on alpha */
10022     case TARGET_NR_nice:
10023         return get_errno(nice(arg1));
10024 #endif
10025     case TARGET_NR_sync:
10026         sync();
10027         return 0;
10028 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
10029     case TARGET_NR_syncfs:
10030         return get_errno(syncfs(arg1));
10031 #endif
10032     case TARGET_NR_kill:
10033         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
10034 #ifdef TARGET_NR_rename
10035     case TARGET_NR_rename:
10036         {
10037             void *p2;
10038             p = lock_user_string(arg1);
10039             p2 = lock_user_string(arg2);
10040             if (!p || !p2)
10041                 ret = -TARGET_EFAULT;
10042             else
10043                 ret = get_errno(rename(p, p2));
10044             unlock_user(p2, arg2, 0);
10045             unlock_user(p, arg1, 0);
10046         }
10047         return ret;
10048 #endif
10049 #if defined(TARGET_NR_renameat)
10050     case TARGET_NR_renameat:
10051         {
10052             void *p2;
10053             p  = lock_user_string(arg2);
10054             p2 = lock_user_string(arg4);
10055             if (!p || !p2)
10056                 ret = -TARGET_EFAULT;
10057             else
10058                 ret = get_errno(renameat(arg1, p, arg3, p2));
10059             unlock_user(p2, arg4, 0);
10060             unlock_user(p, arg2, 0);
10061         }
10062         return ret;
10063 #endif
10064 #if defined(TARGET_NR_renameat2)
10065     case TARGET_NR_renameat2:
10066         {
10067             void *p2;
10068             p  = lock_user_string(arg2);
10069             p2 = lock_user_string(arg4);
10070             if (!p || !p2) {
10071                 ret = -TARGET_EFAULT;
10072             } else {
10073                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
10074             }
10075             unlock_user(p2, arg4, 0);
10076             unlock_user(p, arg2, 0);
10077         }
10078         return ret;
10079 #endif
10080 #ifdef TARGET_NR_mkdir
10081     case TARGET_NR_mkdir:
10082         if (!(p = lock_user_string(arg1)))
10083             return -TARGET_EFAULT;
10084         ret = get_errno(mkdir(p, arg2));
10085         unlock_user(p, arg1, 0);
10086         return ret;
10087 #endif
10088 #if defined(TARGET_NR_mkdirat)
10089     case TARGET_NR_mkdirat:
10090         if (!(p = lock_user_string(arg2)))
10091             return -TARGET_EFAULT;
10092         ret = get_errno(mkdirat(arg1, p, arg3));
10093         unlock_user(p, arg2, 0);
10094         return ret;
10095 #endif
10096 #ifdef TARGET_NR_rmdir
10097     case TARGET_NR_rmdir:
10098         if (!(p = lock_user_string(arg1)))
10099             return -TARGET_EFAULT;
10100         ret = get_errno(rmdir(p));
10101         unlock_user(p, arg1, 0);
10102         return ret;
10103 #endif
10104     case TARGET_NR_dup:
10105         ret = get_errno(dup(arg1));
10106         if (ret >= 0) {
10107             fd_trans_dup(arg1, ret);
10108         }
10109         return ret;
10110 #ifdef TARGET_NR_pipe
10111     case TARGET_NR_pipe:
10112         return do_pipe(cpu_env, arg1, 0, 0);
10113 #endif
10114 #ifdef TARGET_NR_pipe2
10115     case TARGET_NR_pipe2:
10116         return do_pipe(cpu_env, arg1,
10117                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
10118 #endif
10119     case TARGET_NR_times:
10120         {
10121             struct target_tms *tmsp;
10122             struct tms tms;
10123             ret = get_errno(times(&tms));
10124             if (arg1) {
10125                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
10126                 if (!tmsp)
10127                     return -TARGET_EFAULT;
10128                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
10129                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
10130                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
10131                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
10132             }
10133             if (!is_error(ret))
10134                 ret = host_to_target_clock_t(ret);
10135         }
10136         return ret;
10137     case TARGET_NR_acct:
10138         if (arg1 == 0) {
10139             ret = get_errno(acct(NULL));
10140         } else {
10141             if (!(p = lock_user_string(arg1))) {
10142                 return -TARGET_EFAULT;
10143             }
10144             ret = get_errno(acct(path(p)));
10145             unlock_user(p, arg1, 0);
10146         }
10147         return ret;
10148 #ifdef TARGET_NR_umount2
10149     case TARGET_NR_umount2:
10150         if (!(p = lock_user_string(arg1)))
10151             return -TARGET_EFAULT;
10152         ret = get_errno(umount2(p, arg2));
10153         unlock_user(p, arg1, 0);
10154         return ret;
10155 #endif
10156     case TARGET_NR_ioctl:
10157         return do_ioctl(arg1, arg2, arg3);
10158 #ifdef TARGET_NR_fcntl
10159     case TARGET_NR_fcntl:
10160         return do_fcntl(arg1, arg2, arg3);
10161 #endif
10162     case TARGET_NR_setpgid:
10163         return get_errno(setpgid(arg1, arg2));
10164     case TARGET_NR_umask:
10165         return get_errno(umask(arg1));
10166     case TARGET_NR_chroot:
10167         if (!(p = lock_user_string(arg1)))
10168             return -TARGET_EFAULT;
10169         ret = get_errno(chroot(p));
10170         unlock_user(p, arg1, 0);
10171         return ret;
10172 #ifdef TARGET_NR_dup2
10173     case TARGET_NR_dup2:
10174         ret = get_errno(dup2(arg1, arg2));
10175         if (ret >= 0) {
10176             fd_trans_dup(arg1, arg2);
10177         }
10178         return ret;
10179 #endif
10180 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
10181     case TARGET_NR_dup3:
10182     {
10183         int host_flags;
10184 
10185         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
10186             return -EINVAL;
10187         }
10188         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
10189         ret = get_errno(dup3(arg1, arg2, host_flags));
10190         if (ret >= 0) {
10191             fd_trans_dup(arg1, arg2);
10192         }
10193         return ret;
10194     }
10195 #endif
10196 #ifdef TARGET_NR_getppid /* not on alpha */
10197     case TARGET_NR_getppid:
10198         return get_errno(getppid());
10199 #endif
10200 #ifdef TARGET_NR_getpgrp
10201     case TARGET_NR_getpgrp:
10202         return get_errno(getpgrp());
10203 #endif
10204     case TARGET_NR_setsid:
10205         return get_errno(setsid());
10206 #ifdef TARGET_NR_sigaction
10207     case TARGET_NR_sigaction:
10208         {
10209 #if defined(TARGET_MIPS)
10210 	    struct target_sigaction act, oact, *pact, *old_act;
10211 
10212 	    if (arg2) {
10213                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10214                     return -TARGET_EFAULT;
10215 		act._sa_handler = old_act->_sa_handler;
10216 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
10217 		act.sa_flags = old_act->sa_flags;
10218 		unlock_user_struct(old_act, arg2, 0);
10219 		pact = &act;
10220 	    } else {
10221 		pact = NULL;
10222 	    }
10223 
10224         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10225 
10226 	    if (!is_error(ret) && arg3) {
10227                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10228                     return -TARGET_EFAULT;
10229 		old_act->_sa_handler = oact._sa_handler;
10230 		old_act->sa_flags = oact.sa_flags;
10231 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
10232 		old_act->sa_mask.sig[1] = 0;
10233 		old_act->sa_mask.sig[2] = 0;
10234 		old_act->sa_mask.sig[3] = 0;
10235 		unlock_user_struct(old_act, arg3, 1);
10236 	    }
10237 #else
10238             struct target_old_sigaction *old_act;
10239             struct target_sigaction act, oact, *pact;
10240             if (arg2) {
10241                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10242                     return -TARGET_EFAULT;
10243                 act._sa_handler = old_act->_sa_handler;
10244                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10245                 act.sa_flags = old_act->sa_flags;
10246 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10247                 act.sa_restorer = old_act->sa_restorer;
10248 #endif
10249                 unlock_user_struct(old_act, arg2, 0);
10250                 pact = &act;
10251             } else {
10252                 pact = NULL;
10253             }
10254             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10255             if (!is_error(ret) && arg3) {
10256                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10257                     return -TARGET_EFAULT;
10258                 old_act->_sa_handler = oact._sa_handler;
10259                 old_act->sa_mask = oact.sa_mask.sig[0];
10260                 old_act->sa_flags = oact.sa_flags;
10261 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10262                 old_act->sa_restorer = oact.sa_restorer;
10263 #endif
10264                 unlock_user_struct(old_act, arg3, 1);
10265             }
10266 #endif
10267         }
10268         return ret;
10269 #endif
10270     case TARGET_NR_rt_sigaction:
10271         {
10272             /*
10273              * For Alpha and SPARC this is a 5 argument syscall, with
10274              * a 'restorer' parameter which must be copied into the
10275              * sa_restorer field of the sigaction struct.
10276              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10277              * and arg5 is the sigsetsize.
10278              */
10279 #if defined(TARGET_ALPHA)
10280             target_ulong sigsetsize = arg4;
10281             target_ulong restorer = arg5;
10282 #elif defined(TARGET_SPARC)
10283             target_ulong restorer = arg4;
10284             target_ulong sigsetsize = arg5;
10285 #else
10286             target_ulong sigsetsize = arg4;
10287             target_ulong restorer = 0;
10288 #endif
10289             struct target_sigaction *act = NULL;
10290             struct target_sigaction *oact = NULL;
10291 
10292             if (sigsetsize != sizeof(target_sigset_t)) {
10293                 return -TARGET_EINVAL;
10294             }
10295             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10296                 return -TARGET_EFAULT;
10297             }
10298             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10299                 ret = -TARGET_EFAULT;
10300             } else {
10301                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10302                 if (oact) {
10303                     unlock_user_struct(oact, arg3, 1);
10304                 }
10305             }
10306             if (act) {
10307                 unlock_user_struct(act, arg2, 0);
10308             }
10309         }
10310         return ret;
10311 #ifdef TARGET_NR_sgetmask /* not on alpha */
10312     case TARGET_NR_sgetmask:
10313         {
10314             sigset_t cur_set;
10315             abi_ulong target_set;
10316             ret = do_sigprocmask(0, NULL, &cur_set);
10317             if (!ret) {
10318                 host_to_target_old_sigset(&target_set, &cur_set);
10319                 ret = target_set;
10320             }
10321         }
10322         return ret;
10323 #endif
10324 #ifdef TARGET_NR_ssetmask /* not on alpha */
10325     case TARGET_NR_ssetmask:
10326         {
10327             sigset_t set, oset;
10328             abi_ulong target_set = arg1;
10329             target_to_host_old_sigset(&set, &target_set);
10330             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10331             if (!ret) {
10332                 host_to_target_old_sigset(&target_set, &oset);
10333                 ret = target_set;
10334             }
10335         }
10336         return ret;
10337 #endif
10338 #ifdef TARGET_NR_sigprocmask
10339     case TARGET_NR_sigprocmask:
10340         {
10341 #if defined(TARGET_ALPHA)
10342             sigset_t set, oldset;
10343             abi_ulong mask;
10344             int how;
10345 
10346             switch (arg1) {
10347             case TARGET_SIG_BLOCK:
10348                 how = SIG_BLOCK;
10349                 break;
10350             case TARGET_SIG_UNBLOCK:
10351                 how = SIG_UNBLOCK;
10352                 break;
10353             case TARGET_SIG_SETMASK:
10354                 how = SIG_SETMASK;
10355                 break;
10356             default:
10357                 return -TARGET_EINVAL;
10358             }
10359             mask = arg2;
10360             target_to_host_old_sigset(&set, &mask);
10361 
10362             ret = do_sigprocmask(how, &set, &oldset);
10363             if (!is_error(ret)) {
10364                 host_to_target_old_sigset(&mask, &oldset);
10365                 ret = mask;
10366                 cpu_env->ir[IR_V0] = 0; /* force no error */
10367             }
10368 #else
10369             sigset_t set, oldset, *set_ptr;
10370             int how;
10371 
10372             if (arg2) {
10373                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10374                 if (!p) {
10375                     return -TARGET_EFAULT;
10376                 }
10377                 target_to_host_old_sigset(&set, p);
10378                 unlock_user(p, arg2, 0);
10379                 set_ptr = &set;
10380                 switch (arg1) {
10381                 case TARGET_SIG_BLOCK:
10382                     how = SIG_BLOCK;
10383                     break;
10384                 case TARGET_SIG_UNBLOCK:
10385                     how = SIG_UNBLOCK;
10386                     break;
10387                 case TARGET_SIG_SETMASK:
10388                     how = SIG_SETMASK;
10389                     break;
10390                 default:
10391                     return -TARGET_EINVAL;
10392                 }
10393             } else {
10394                 how = 0;
10395                 set_ptr = NULL;
10396             }
10397             ret = do_sigprocmask(how, set_ptr, &oldset);
10398             if (!is_error(ret) && arg3) {
10399                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10400                     return -TARGET_EFAULT;
10401                 host_to_target_old_sigset(p, &oldset);
10402                 unlock_user(p, arg3, sizeof(target_sigset_t));
10403             }
10404 #endif
10405         }
10406         return ret;
10407 #endif
10408     case TARGET_NR_rt_sigprocmask:
10409         {
10410             int how = arg1;
10411             sigset_t set, oldset, *set_ptr;
10412 
10413             if (arg4 != sizeof(target_sigset_t)) {
10414                 return -TARGET_EINVAL;
10415             }
10416 
10417             if (arg2) {
10418                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10419                 if (!p) {
10420                     return -TARGET_EFAULT;
10421                 }
10422                 target_to_host_sigset(&set, p);
10423                 unlock_user(p, arg2, 0);
10424                 set_ptr = &set;
10425                 switch(how) {
10426                 case TARGET_SIG_BLOCK:
10427                     how = SIG_BLOCK;
10428                     break;
10429                 case TARGET_SIG_UNBLOCK:
10430                     how = SIG_UNBLOCK;
10431                     break;
10432                 case TARGET_SIG_SETMASK:
10433                     how = SIG_SETMASK;
10434                     break;
10435                 default:
10436                     return -TARGET_EINVAL;
10437                 }
10438             } else {
10439                 how = 0;
10440                 set_ptr = NULL;
10441             }
10442             ret = do_sigprocmask(how, set_ptr, &oldset);
10443             if (!is_error(ret) && arg3) {
10444                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10445                     return -TARGET_EFAULT;
10446                 host_to_target_sigset(p, &oldset);
10447                 unlock_user(p, arg3, sizeof(target_sigset_t));
10448             }
10449         }
10450         return ret;
10451 #ifdef TARGET_NR_sigpending
10452     case TARGET_NR_sigpending:
10453         {
10454             sigset_t set;
10455             ret = get_errno(sigpending(&set));
10456             if (!is_error(ret)) {
10457                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10458                     return -TARGET_EFAULT;
10459                 host_to_target_old_sigset(p, &set);
10460                 unlock_user(p, arg1, sizeof(target_sigset_t));
10461             }
10462         }
10463         return ret;
10464 #endif
10465     case TARGET_NR_rt_sigpending:
10466         {
10467             sigset_t set;
10468 
10469             /* Yes, this check is >, not != like most. We follow the kernel's
10470              * logic and it does it like this because it implements
10471              * NR_sigpending through the same code path, and in that case
10472              * the old_sigset_t is smaller in size.
10473              */
10474             if (arg2 > sizeof(target_sigset_t)) {
10475                 return -TARGET_EINVAL;
10476             }
10477 
10478             ret = get_errno(sigpending(&set));
10479             if (!is_error(ret)) {
10480                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10481                     return -TARGET_EFAULT;
10482                 host_to_target_sigset(p, &set);
10483                 unlock_user(p, arg1, sizeof(target_sigset_t));
10484             }
10485         }
10486         return ret;
10487 #ifdef TARGET_NR_sigsuspend
10488     case TARGET_NR_sigsuspend:
10489         {
10490             sigset_t *set;
10491 
10492 #if defined(TARGET_ALPHA)
10493             TaskState *ts = get_task_state(cpu);
10494             /* target_to_host_old_sigset will bswap back */
10495             abi_ulong mask = tswapal(arg1);
10496             set = &ts->sigsuspend_mask;
10497             target_to_host_old_sigset(set, &mask);
10498 #else
10499             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10500             if (ret != 0) {
10501                 return ret;
10502             }
10503 #endif
10504             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10505             finish_sigsuspend_mask(ret);
10506         }
10507         return ret;
10508 #endif
10509     case TARGET_NR_rt_sigsuspend:
10510         {
10511             sigset_t *set;
10512 
10513             ret = process_sigsuspend_mask(&set, arg1, arg2);
10514             if (ret != 0) {
10515                 return ret;
10516             }
10517             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10518             finish_sigsuspend_mask(ret);
10519         }
10520         return ret;
10521 #ifdef TARGET_NR_rt_sigtimedwait
10522     case TARGET_NR_rt_sigtimedwait:
10523         {
10524             sigset_t set;
10525             struct timespec uts, *puts;
10526             siginfo_t uinfo;
10527 
10528             if (arg4 != sizeof(target_sigset_t)) {
10529                 return -TARGET_EINVAL;
10530             }
10531 
10532             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10533                 return -TARGET_EFAULT;
10534             target_to_host_sigset(&set, p);
10535             unlock_user(p, arg1, 0);
10536             if (arg3) {
10537                 puts = &uts;
10538                 if (target_to_host_timespec(puts, arg3)) {
10539                     return -TARGET_EFAULT;
10540                 }
10541             } else {
10542                 puts = NULL;
10543             }
10544             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10545                                                  SIGSET_T_SIZE));
10546             if (!is_error(ret)) {
10547                 if (arg2) {
10548                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10549                                   0);
10550                     if (!p) {
10551                         return -TARGET_EFAULT;
10552                     }
10553                     host_to_target_siginfo(p, &uinfo);
10554                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10555                 }
10556                 ret = host_to_target_signal(ret);
10557             }
10558         }
10559         return ret;
10560 #endif
10561 #ifdef TARGET_NR_rt_sigtimedwait_time64
10562     case TARGET_NR_rt_sigtimedwait_time64:
10563         {
10564             sigset_t set;
10565             struct timespec uts, *puts;
10566             siginfo_t uinfo;
10567 
10568             if (arg4 != sizeof(target_sigset_t)) {
10569                 return -TARGET_EINVAL;
10570             }
10571 
10572             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10573             if (!p) {
10574                 return -TARGET_EFAULT;
10575             }
10576             target_to_host_sigset(&set, p);
10577             unlock_user(p, arg1, 0);
10578             if (arg3) {
10579                 puts = &uts;
10580                 if (target_to_host_timespec64(puts, arg3)) {
10581                     return -TARGET_EFAULT;
10582                 }
10583             } else {
10584                 puts = NULL;
10585             }
10586             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10587                                                  SIGSET_T_SIZE));
10588             if (!is_error(ret)) {
10589                 if (arg2) {
10590                     p = lock_user(VERIFY_WRITE, arg2,
10591                                   sizeof(target_siginfo_t), 0);
10592                     if (!p) {
10593                         return -TARGET_EFAULT;
10594                     }
10595                     host_to_target_siginfo(p, &uinfo);
10596                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10597                 }
10598                 ret = host_to_target_signal(ret);
10599             }
10600         }
10601         return ret;
10602 #endif
10603     case TARGET_NR_rt_sigqueueinfo:
10604         {
10605             siginfo_t uinfo;
10606 
10607             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10608             if (!p) {
10609                 return -TARGET_EFAULT;
10610             }
10611             target_to_host_siginfo(&uinfo, p);
10612             unlock_user(p, arg3, 0);
10613             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10614         }
10615         return ret;
10616     case TARGET_NR_rt_tgsigqueueinfo:
10617         {
10618             siginfo_t uinfo;
10619 
10620             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10621             if (!p) {
10622                 return -TARGET_EFAULT;
10623             }
10624             target_to_host_siginfo(&uinfo, p);
10625             unlock_user(p, arg4, 0);
10626             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10627         }
10628         return ret;
10629 #ifdef TARGET_NR_sigreturn
10630     case TARGET_NR_sigreturn:
10631         if (block_signals()) {
10632             return -QEMU_ERESTARTSYS;
10633         }
10634         return do_sigreturn(cpu_env);
10635 #endif
10636     case TARGET_NR_rt_sigreturn:
10637         if (block_signals()) {
10638             return -QEMU_ERESTARTSYS;
10639         }
10640         return do_rt_sigreturn(cpu_env);
10641     case TARGET_NR_sethostname:
10642         if (!(p = lock_user_string(arg1)))
10643             return -TARGET_EFAULT;
10644         ret = get_errno(sethostname(p, arg2));
10645         unlock_user(p, arg1, 0);
10646         return ret;
10647 #ifdef TARGET_NR_setrlimit
10648     case TARGET_NR_setrlimit:
10649         {
10650             int resource = target_to_host_resource(arg1);
10651             struct target_rlimit *target_rlim;
10652             struct rlimit rlim;
10653             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10654                 return -TARGET_EFAULT;
10655             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10656             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10657             unlock_user_struct(target_rlim, arg2, 0);
10658             /*
10659              * If we just passed through resource limit settings for memory then
10660              * they would also apply to QEMU's own allocations, and QEMU will
10661              * crash or hang or die if its allocations fail. Ideally we would
10662              * track the guest allocations in QEMU and apply the limits ourselves.
10663              * For now, just tell the guest the call succeeded but don't actually
10664              * limit anything.
10665              */
10666             if (resource != RLIMIT_AS &&
10667                 resource != RLIMIT_DATA &&
10668                 resource != RLIMIT_STACK) {
10669                 return get_errno(setrlimit(resource, &rlim));
10670             } else {
10671                 return 0;
10672             }
10673         }
10674 #endif
10675 #ifdef TARGET_NR_getrlimit
10676     case TARGET_NR_getrlimit:
10677         {
10678             int resource = target_to_host_resource(arg1);
10679             struct target_rlimit *target_rlim;
10680             struct rlimit rlim;
10681 
10682             ret = get_errno(getrlimit(resource, &rlim));
10683             if (!is_error(ret)) {
10684                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10685                     return -TARGET_EFAULT;
10686                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10687                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10688                 unlock_user_struct(target_rlim, arg2, 1);
10689             }
10690         }
10691         return ret;
10692 #endif
10693     case TARGET_NR_getrusage:
10694         {
10695             struct rusage rusage;
10696             ret = get_errno(getrusage(arg1, &rusage));
10697             if (!is_error(ret)) {
10698                 ret = host_to_target_rusage(arg2, &rusage);
10699             }
10700         }
10701         return ret;
10702 #if defined(TARGET_NR_gettimeofday)
10703     case TARGET_NR_gettimeofday:
10704         {
10705             struct timeval tv;
10706             struct timezone tz;
10707 
10708             ret = get_errno(gettimeofday(&tv, &tz));
10709             if (!is_error(ret)) {
10710                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10711                     return -TARGET_EFAULT;
10712                 }
10713                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10714                     return -TARGET_EFAULT;
10715                 }
10716             }
10717         }
10718         return ret;
10719 #endif
10720 #if defined(TARGET_NR_settimeofday)
10721     case TARGET_NR_settimeofday:
10722         {
10723             struct timeval tv, *ptv = NULL;
10724             struct timezone tz, *ptz = NULL;
10725 
10726             if (arg1) {
10727                 if (copy_from_user_timeval(&tv, arg1)) {
10728                     return -TARGET_EFAULT;
10729                 }
10730                 ptv = &tv;
10731             }
10732 
10733             if (arg2) {
10734                 if (copy_from_user_timezone(&tz, arg2)) {
10735                     return -TARGET_EFAULT;
10736                 }
10737                 ptz = &tz;
10738             }
10739 
10740             return get_errno(settimeofday(ptv, ptz));
10741         }
10742 #endif
10743 #if defined(TARGET_NR_select)
10744     case TARGET_NR_select:
10745 #if defined(TARGET_WANT_NI_OLD_SELECT)
10746         /* some architectures used to have old_select here
10747          * but now ENOSYS it.
10748          */
10749         ret = -TARGET_ENOSYS;
10750 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10751         ret = do_old_select(arg1);
10752 #else
10753         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10754 #endif
10755         return ret;
10756 #endif
10757 #ifdef TARGET_NR_pselect6
10758     case TARGET_NR_pselect6:
10759         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10760 #endif
10761 #ifdef TARGET_NR_pselect6_time64
10762     case TARGET_NR_pselect6_time64:
10763         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10764 #endif
10765 #ifdef TARGET_NR_symlink
10766     case TARGET_NR_symlink:
10767         {
10768             void *p2;
10769             p = lock_user_string(arg1);
10770             p2 = lock_user_string(arg2);
10771             if (!p || !p2)
10772                 ret = -TARGET_EFAULT;
10773             else
10774                 ret = get_errno(symlink(p, p2));
10775             unlock_user(p2, arg2, 0);
10776             unlock_user(p, arg1, 0);
10777         }
10778         return ret;
10779 #endif
10780 #if defined(TARGET_NR_symlinkat)
10781     case TARGET_NR_symlinkat:
10782         {
10783             void *p2;
10784             p  = lock_user_string(arg1);
10785             p2 = lock_user_string(arg3);
10786             if (!p || !p2)
10787                 ret = -TARGET_EFAULT;
10788             else
10789                 ret = get_errno(symlinkat(p, arg2, p2));
10790             unlock_user(p2, arg3, 0);
10791             unlock_user(p, arg1, 0);
10792         }
10793         return ret;
10794 #endif
10795 #ifdef TARGET_NR_readlink
10796     case TARGET_NR_readlink:
10797         {
10798             void *p2;
10799             p = lock_user_string(arg1);
10800             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10801             ret = get_errno(do_guest_readlink(p, p2, arg3));
10802             unlock_user(p2, arg2, ret);
10803             unlock_user(p, arg1, 0);
10804         }
10805         return ret;
10806 #endif
10807 #if defined(TARGET_NR_readlinkat)
10808     case TARGET_NR_readlinkat:
10809         {
10810             void *p2;
10811             p  = lock_user_string(arg2);
10812             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10813             if (!p || !p2) {
10814                 ret = -TARGET_EFAULT;
10815             } else if (!arg4) {
10816                 /* Short circuit this for the magic exe check. */
10817                 ret = -TARGET_EINVAL;
10818             } else if (is_proc_myself((const char *)p, "exe")) {
10819                 /*
10820                  * Don't worry about sign mismatch as earlier mapping
10821                  * logic would have thrown a bad address error.
10822                  */
10823                 ret = MIN(strlen(exec_path), arg4);
10824                 /* We cannot NUL terminate the string. */
10825                 memcpy(p2, exec_path, ret);
10826             } else {
10827                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10828             }
10829             unlock_user(p2, arg3, ret);
10830             unlock_user(p, arg2, 0);
10831         }
10832         return ret;
10833 #endif
10834 #ifdef TARGET_NR_swapon
10835     case TARGET_NR_swapon:
10836         if (!(p = lock_user_string(arg1)))
10837             return -TARGET_EFAULT;
10838         ret = get_errno(swapon(p, arg2));
10839         unlock_user(p, arg1, 0);
10840         return ret;
10841 #endif
10842     case TARGET_NR_reboot:
10843         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10844            /* arg4 must be ignored in all other cases */
10845            p = lock_user_string(arg4);
10846            if (!p) {
10847                return -TARGET_EFAULT;
10848            }
10849            ret = get_errno(reboot(arg1, arg2, arg3, p));
10850            unlock_user(p, arg4, 0);
10851         } else {
10852            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10853         }
10854         return ret;
10855 #ifdef TARGET_NR_mmap
10856     case TARGET_NR_mmap:
10857 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10858         {
10859             abi_ulong *v;
10860             abi_ulong v1, v2, v3, v4, v5, v6;
10861             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10862                 return -TARGET_EFAULT;
10863             v1 = tswapal(v[0]);
10864             v2 = tswapal(v[1]);
10865             v3 = tswapal(v[2]);
10866             v4 = tswapal(v[3]);
10867             v5 = tswapal(v[4]);
10868             v6 = tswapal(v[5]);
10869             unlock_user(v, arg1, 0);
10870             return do_mmap(v1, v2, v3, v4, v5, v6);
10871         }
10872 #else
10873         /* mmap pointers are always untagged */
10874         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10875 #endif
10876 #endif
10877 #ifdef TARGET_NR_mmap2
10878     case TARGET_NR_mmap2:
10879 #ifndef MMAP_SHIFT
10880 #define MMAP_SHIFT 12
10881 #endif
10882         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10883                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10884 #endif
10885     case TARGET_NR_munmap:
10886         arg1 = cpu_untagged_addr(cpu, arg1);
10887         return get_errno(target_munmap(arg1, arg2));
10888     case TARGET_NR_mprotect:
10889         arg1 = cpu_untagged_addr(cpu, arg1);
10890         {
10891             TaskState *ts = get_task_state(cpu);
10892             /* Special hack to detect libc making the stack executable.  */
10893             if ((arg3 & PROT_GROWSDOWN)
10894                 && arg1 >= ts->info->stack_limit
10895                 && arg1 <= ts->info->start_stack) {
10896                 arg3 &= ~PROT_GROWSDOWN;
10897                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10898                 arg1 = ts->info->stack_limit;
10899             }
10900         }
10901         return get_errno(target_mprotect(arg1, arg2, arg3));
10902 #ifdef TARGET_NR_mremap
10903     case TARGET_NR_mremap:
10904         arg1 = cpu_untagged_addr(cpu, arg1);
10905         /* mremap new_addr (arg5) is always untagged */
10906         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10907 #endif
10908         /* ??? msync/mlock/munlock are broken for softmmu.  */
10909 #ifdef TARGET_NR_msync
10910     case TARGET_NR_msync:
10911         return get_errno(msync(g2h(cpu, arg1), arg2,
10912                                target_to_host_msync_arg(arg3)));
10913 #endif
10914 #ifdef TARGET_NR_mlock
10915     case TARGET_NR_mlock:
10916         return get_errno(mlock(g2h(cpu, arg1), arg2));
10917 #endif
10918 #ifdef TARGET_NR_munlock
10919     case TARGET_NR_munlock:
10920         return get_errno(munlock(g2h(cpu, arg1), arg2));
10921 #endif
10922 #ifdef TARGET_NR_mlockall
10923     case TARGET_NR_mlockall:
10924         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10925 #endif
10926 #ifdef TARGET_NR_munlockall
10927     case TARGET_NR_munlockall:
10928         return get_errno(munlockall());
10929 #endif
10930 #ifdef TARGET_NR_truncate
10931     case TARGET_NR_truncate:
10932         if (!(p = lock_user_string(arg1)))
10933             return -TARGET_EFAULT;
10934         ret = get_errno(truncate(p, arg2));
10935         unlock_user(p, arg1, 0);
10936         return ret;
10937 #endif
10938 #ifdef TARGET_NR_ftruncate
10939     case TARGET_NR_ftruncate:
10940         return get_errno(ftruncate(arg1, arg2));
10941 #endif
10942     case TARGET_NR_fchmod:
10943         return get_errno(fchmod(arg1, arg2));
10944 #if defined(TARGET_NR_fchmodat)
10945     case TARGET_NR_fchmodat:
10946         if (!(p = lock_user_string(arg2)))
10947             return -TARGET_EFAULT;
10948         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10949         unlock_user(p, arg2, 0);
10950         return ret;
10951 #endif
10952 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
10953     case TARGET_NR_fchmodat2:
10954         if (!(p = lock_user_string(arg2))) {
10955             return -TARGET_EFAULT;
10956         }
10957         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
10958         unlock_user(p, arg2, 0);
10959         return ret;
10960 #endif
10961     case TARGET_NR_getpriority:
10962         /* Note that negative values are valid for getpriority, so we must
10963            differentiate based on errno settings.  */
10964         errno = 0;
10965         ret = getpriority(arg1, arg2);
10966         if (ret == -1 && errno != 0) {
10967             return -host_to_target_errno(errno);
10968         }
10969 #ifdef TARGET_ALPHA
10970         /* Return value is the unbiased priority.  Signal no error.  */
10971         cpu_env->ir[IR_V0] = 0;
10972 #else
10973         /* Return value is a biased priority to avoid negative numbers.  */
10974         ret = 20 - ret;
10975 #endif
10976         return ret;
10977     case TARGET_NR_setpriority:
10978         return get_errno(setpriority(arg1, arg2, arg3));
10979 #ifdef TARGET_NR_statfs
10980     case TARGET_NR_statfs:
10981         if (!(p = lock_user_string(arg1))) {
10982             return -TARGET_EFAULT;
10983         }
10984         ret = get_errno(statfs(path(p), &stfs));
10985         unlock_user(p, arg1, 0);
10986     convert_statfs:
10987         if (!is_error(ret)) {
10988             struct target_statfs *target_stfs;
10989 
10990             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10991                 return -TARGET_EFAULT;
10992             __put_user(stfs.f_type, &target_stfs->f_type);
10993             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10994             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10995             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10996             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10997             __put_user(stfs.f_files, &target_stfs->f_files);
10998             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10999             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
11000             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
11001             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
11002             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
11003 #ifdef _STATFS_F_FLAGS
11004             __put_user(stfs.f_flags, &target_stfs->f_flags);
11005 #else
11006             __put_user(0, &target_stfs->f_flags);
11007 #endif
11008             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
11009             unlock_user_struct(target_stfs, arg2, 1);
11010         }
11011         return ret;
11012 #endif
11013 #ifdef TARGET_NR_fstatfs
11014     case TARGET_NR_fstatfs:
11015         ret = get_errno(fstatfs(arg1, &stfs));
11016         goto convert_statfs;
11017 #endif
11018 #ifdef TARGET_NR_statfs64
11019     case TARGET_NR_statfs64:
11020         if (!(p = lock_user_string(arg1))) {
11021             return -TARGET_EFAULT;
11022         }
11023         ret = get_errno(statfs(path(p), &stfs));
11024         unlock_user(p, arg1, 0);
11025     convert_statfs64:
11026         if (!is_error(ret)) {
11027             struct target_statfs64 *target_stfs;
11028 
11029             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
11030                 return -TARGET_EFAULT;
11031             __put_user(stfs.f_type, &target_stfs->f_type);
11032             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
11033             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
11034             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
11035             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
11036             __put_user(stfs.f_files, &target_stfs->f_files);
11037             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
11038             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
11039             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
11040             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
11041             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
11042 #ifdef _STATFS_F_FLAGS
11043             __put_user(stfs.f_flags, &target_stfs->f_flags);
11044 #else
11045             __put_user(0, &target_stfs->f_flags);
11046 #endif
11047             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
11048             unlock_user_struct(target_stfs, arg3, 1);
11049         }
11050         return ret;
11051     case TARGET_NR_fstatfs64:
11052         ret = get_errno(fstatfs(arg1, &stfs));
11053         goto convert_statfs64;
11054 #endif
11055 #ifdef TARGET_NR_socketcall
11056     case TARGET_NR_socketcall:
11057         return do_socketcall(arg1, arg2);
11058 #endif
11059 #ifdef TARGET_NR_accept
11060     case TARGET_NR_accept:
11061         return do_accept4(arg1, arg2, arg3, 0);
11062 #endif
11063 #ifdef TARGET_NR_accept4
11064     case TARGET_NR_accept4:
11065         return do_accept4(arg1, arg2, arg3, arg4);
11066 #endif
11067 #ifdef TARGET_NR_bind
11068     case TARGET_NR_bind:
11069         return do_bind(arg1, arg2, arg3);
11070 #endif
11071 #ifdef TARGET_NR_connect
11072     case TARGET_NR_connect:
11073         return do_connect(arg1, arg2, arg3);
11074 #endif
11075 #ifdef TARGET_NR_getpeername
11076     case TARGET_NR_getpeername:
11077         return do_getpeername(arg1, arg2, arg3);
11078 #endif
11079 #ifdef TARGET_NR_getsockname
11080     case TARGET_NR_getsockname:
11081         return do_getsockname(arg1, arg2, arg3);
11082 #endif
11083 #ifdef TARGET_NR_getsockopt
11084     case TARGET_NR_getsockopt:
11085         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
11086 #endif
11087 #ifdef TARGET_NR_listen
11088     case TARGET_NR_listen:
11089         return get_errno(listen(arg1, arg2));
11090 #endif
11091 #ifdef TARGET_NR_recv
11092     case TARGET_NR_recv:
11093         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
11094 #endif
11095 #ifdef TARGET_NR_recvfrom
11096     case TARGET_NR_recvfrom:
11097         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
11098 #endif
11099 #ifdef TARGET_NR_recvmsg
11100     case TARGET_NR_recvmsg:
11101         return do_sendrecvmsg(arg1, arg2, arg3, 0);
11102 #endif
11103 #ifdef TARGET_NR_send
11104     case TARGET_NR_send:
11105         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
11106 #endif
11107 #ifdef TARGET_NR_sendmsg
11108     case TARGET_NR_sendmsg:
11109         return do_sendrecvmsg(arg1, arg2, arg3, 1);
11110 #endif
11111 #ifdef TARGET_NR_sendmmsg
11112     case TARGET_NR_sendmmsg:
11113         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
11114 #endif
11115 #ifdef TARGET_NR_recvmmsg
11116     case TARGET_NR_recvmmsg:
11117         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
11118 #endif
11119 #ifdef TARGET_NR_sendto
11120     case TARGET_NR_sendto:
11121         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
11122 #endif
11123 #ifdef TARGET_NR_shutdown
11124     case TARGET_NR_shutdown:
11125         return get_errno(shutdown(arg1, arg2));
11126 #endif
11127 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
11128     case TARGET_NR_getrandom:
11129         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11130         if (!p) {
11131             return -TARGET_EFAULT;
11132         }
11133         ret = get_errno(getrandom(p, arg2, arg3));
11134         unlock_user(p, arg1, ret);
11135         return ret;
11136 #endif
11137 #ifdef TARGET_NR_socket
11138     case TARGET_NR_socket:
11139         return do_socket(arg1, arg2, arg3);
11140 #endif
11141 #ifdef TARGET_NR_socketpair
11142     case TARGET_NR_socketpair:
11143         return do_socketpair(arg1, arg2, arg3, arg4);
11144 #endif
11145 #ifdef TARGET_NR_setsockopt
11146     case TARGET_NR_setsockopt:
11147         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
11148 #endif
11149 #if defined(TARGET_NR_syslog)
11150     case TARGET_NR_syslog:
11151         {
11152             int len = arg2;
11153 
11154             switch (arg1) {
11155             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
11156             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
11157             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
11158             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
11159             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
11160             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
11161             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
11162             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
11163                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
11164             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
11165             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
11166             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
11167                 {
11168                     if (len < 0) {
11169                         return -TARGET_EINVAL;
11170                     }
11171                     if (len == 0) {
11172                         return 0;
11173                     }
11174                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11175                     if (!p) {
11176                         return -TARGET_EFAULT;
11177                     }
11178                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
11179                     unlock_user(p, arg2, arg3);
11180                 }
11181                 return ret;
11182             default:
11183                 return -TARGET_EINVAL;
11184             }
11185         }
11186         break;
11187 #endif
11188     case TARGET_NR_setitimer:
11189         {
11190             struct itimerval value, ovalue, *pvalue;
11191 
11192             if (arg2) {
11193                 pvalue = &value;
11194                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
11195                     || copy_from_user_timeval(&pvalue->it_value,
11196                                               arg2 + sizeof(struct target_timeval)))
11197                     return -TARGET_EFAULT;
11198             } else {
11199                 pvalue = NULL;
11200             }
11201             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
11202             if (!is_error(ret) && arg3) {
11203                 if (copy_to_user_timeval(arg3,
11204                                          &ovalue.it_interval)
11205                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
11206                                             &ovalue.it_value))
11207                     return -TARGET_EFAULT;
11208             }
11209         }
11210         return ret;
11211     case TARGET_NR_getitimer:
11212         {
11213             struct itimerval value;
11214 
11215             ret = get_errno(getitimer(arg1, &value));
11216             if (!is_error(ret) && arg2) {
11217                 if (copy_to_user_timeval(arg2,
11218                                          &value.it_interval)
11219                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
11220                                             &value.it_value))
11221                     return -TARGET_EFAULT;
11222             }
11223         }
11224         return ret;
11225 #ifdef TARGET_NR_stat
11226     case TARGET_NR_stat:
11227         if (!(p = lock_user_string(arg1))) {
11228             return -TARGET_EFAULT;
11229         }
11230         ret = get_errno(stat(path(p), &st));
11231         unlock_user(p, arg1, 0);
11232         goto do_stat;
11233 #endif
11234 #ifdef TARGET_NR_lstat
11235     case TARGET_NR_lstat:
11236         if (!(p = lock_user_string(arg1))) {
11237             return -TARGET_EFAULT;
11238         }
11239         ret = get_errno(lstat(path(p), &st));
11240         unlock_user(p, arg1, 0);
11241         goto do_stat;
11242 #endif
11243 #ifdef TARGET_NR_fstat
11244     case TARGET_NR_fstat:
11245         {
11246             ret = get_errno(fstat(arg1, &st));
11247 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11248         do_stat:
11249 #endif
11250             if (!is_error(ret)) {
11251                 struct target_stat *target_st;
11252 
11253                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11254                     return -TARGET_EFAULT;
11255                 memset(target_st, 0, sizeof(*target_st));
11256                 __put_user(st.st_dev, &target_st->st_dev);
11257                 __put_user(st.st_ino, &target_st->st_ino);
11258                 __put_user(st.st_mode, &target_st->st_mode);
11259                 __put_user(st.st_uid, &target_st->st_uid);
11260                 __put_user(st.st_gid, &target_st->st_gid);
11261                 __put_user(st.st_nlink, &target_st->st_nlink);
11262                 __put_user(st.st_rdev, &target_st->st_rdev);
11263                 __put_user(st.st_size, &target_st->st_size);
11264                 __put_user(st.st_blksize, &target_st->st_blksize);
11265                 __put_user(st.st_blocks, &target_st->st_blocks);
11266                 __put_user(st.st_atime, &target_st->target_st_atime);
11267                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11268                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11269 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11270                 __put_user(st.st_atim.tv_nsec,
11271                            &target_st->target_st_atime_nsec);
11272                 __put_user(st.st_mtim.tv_nsec,
11273                            &target_st->target_st_mtime_nsec);
11274                 __put_user(st.st_ctim.tv_nsec,
11275                            &target_st->target_st_ctime_nsec);
11276 #endif
11277                 unlock_user_struct(target_st, arg2, 1);
11278             }
11279         }
11280         return ret;
11281 #endif
11282     case TARGET_NR_vhangup:
11283         return get_errno(vhangup());
11284 #ifdef TARGET_NR_syscall
11285     case TARGET_NR_syscall:
11286         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11287                           arg6, arg7, arg8, 0);
11288 #endif
11289 #if defined(TARGET_NR_wait4)
11290     case TARGET_NR_wait4:
11291         {
11292             int status;
11293             abi_long status_ptr = arg2;
11294             struct rusage rusage, *rusage_ptr;
11295             abi_ulong target_rusage = arg4;
11296             abi_long rusage_err;
11297             if (target_rusage)
11298                 rusage_ptr = &rusage;
11299             else
11300                 rusage_ptr = NULL;
11301             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11302             if (!is_error(ret)) {
11303                 if (status_ptr && ret) {
11304                     status = host_to_target_waitstatus(status);
11305                     if (put_user_s32(status, status_ptr))
11306                         return -TARGET_EFAULT;
11307                 }
11308                 if (target_rusage) {
11309                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11310                     if (rusage_err) {
11311                         ret = rusage_err;
11312                     }
11313                 }
11314             }
11315         }
11316         return ret;
11317 #endif
11318 #ifdef TARGET_NR_swapoff
11319     case TARGET_NR_swapoff:
11320         if (!(p = lock_user_string(arg1)))
11321             return -TARGET_EFAULT;
11322         ret = get_errno(swapoff(p));
11323         unlock_user(p, arg1, 0);
11324         return ret;
11325 #endif
11326     case TARGET_NR_sysinfo:
11327         {
11328             struct target_sysinfo *target_value;
11329             struct sysinfo value;
11330             ret = get_errno(sysinfo(&value));
11331             if (!is_error(ret) && arg1)
11332             {
11333                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11334                     return -TARGET_EFAULT;
11335                 __put_user(value.uptime, &target_value->uptime);
11336                 __put_user(value.loads[0], &target_value->loads[0]);
11337                 __put_user(value.loads[1], &target_value->loads[1]);
11338                 __put_user(value.loads[2], &target_value->loads[2]);
11339                 __put_user(value.totalram, &target_value->totalram);
11340                 __put_user(value.freeram, &target_value->freeram);
11341                 __put_user(value.sharedram, &target_value->sharedram);
11342                 __put_user(value.bufferram, &target_value->bufferram);
11343                 __put_user(value.totalswap, &target_value->totalswap);
11344                 __put_user(value.freeswap, &target_value->freeswap);
11345                 __put_user(value.procs, &target_value->procs);
11346                 __put_user(value.totalhigh, &target_value->totalhigh);
11347                 __put_user(value.freehigh, &target_value->freehigh);
11348                 __put_user(value.mem_unit, &target_value->mem_unit);
11349                 unlock_user_struct(target_value, arg1, 1);
11350             }
11351         }
11352         return ret;
11353 #ifdef TARGET_NR_ipc
11354     case TARGET_NR_ipc:
11355         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11356 #endif
11357 #ifdef TARGET_NR_semget
11358     case TARGET_NR_semget:
11359         return get_errno(semget(arg1, arg2, arg3));
11360 #endif
11361 #ifdef TARGET_NR_semop
11362     case TARGET_NR_semop:
11363         return do_semtimedop(arg1, arg2, arg3, 0, false);
11364 #endif
11365 #ifdef TARGET_NR_semtimedop
11366     case TARGET_NR_semtimedop:
11367         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11368 #endif
11369 #ifdef TARGET_NR_semtimedop_time64
11370     case TARGET_NR_semtimedop_time64:
11371         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11372 #endif
11373 #ifdef TARGET_NR_semctl
11374     case TARGET_NR_semctl:
11375         return do_semctl(arg1, arg2, arg3, arg4);
11376 #endif
11377 #ifdef TARGET_NR_msgctl
11378     case TARGET_NR_msgctl:
11379         return do_msgctl(arg1, arg2, arg3);
11380 #endif
11381 #ifdef TARGET_NR_msgget
11382     case TARGET_NR_msgget:
11383         return get_errno(msgget(arg1, arg2));
11384 #endif
11385 #ifdef TARGET_NR_msgrcv
11386     case TARGET_NR_msgrcv:
11387         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11388 #endif
11389 #ifdef TARGET_NR_msgsnd
11390     case TARGET_NR_msgsnd:
11391         return do_msgsnd(arg1, arg2, arg3, arg4);
11392 #endif
11393 #ifdef TARGET_NR_shmget
11394     case TARGET_NR_shmget:
11395         return get_errno(shmget(arg1, arg2, arg3));
11396 #endif
11397 #ifdef TARGET_NR_shmctl
11398     case TARGET_NR_shmctl:
11399         return do_shmctl(arg1, arg2, arg3);
11400 #endif
11401 #ifdef TARGET_NR_shmat
11402     case TARGET_NR_shmat:
11403         return target_shmat(cpu_env, arg1, arg2, arg3);
11404 #endif
11405 #ifdef TARGET_NR_shmdt
11406     case TARGET_NR_shmdt:
11407         return target_shmdt(arg1);
11408 #endif
11409     case TARGET_NR_fsync:
11410         return get_errno(fsync(arg1));
11411     case TARGET_NR_clone:
11412         /* Linux manages to have three different orderings for its
11413          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11414          * match the kernel's CONFIG_CLONE_* settings.
11415          * Microblaze is further special in that it uses a sixth
11416          * implicit argument to clone for the TLS pointer.
11417          */
11418 #if defined(TARGET_MICROBLAZE)
11419         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11420 #elif defined(TARGET_CLONE_BACKWARDS)
11421         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11422 #elif defined(TARGET_CLONE_BACKWARDS2)
11423         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11424 #else
11425         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11426 #endif
11427         return ret;
11428 #ifdef __NR_exit_group
11429         /* new thread calls */
11430     case TARGET_NR_exit_group:
11431         preexit_cleanup(cpu_env, arg1);
11432         return get_errno(exit_group(arg1));
11433 #endif
11434     case TARGET_NR_setdomainname:
11435         if (!(p = lock_user_string(arg1)))
11436             return -TARGET_EFAULT;
11437         ret = get_errno(setdomainname(p, arg2));
11438         unlock_user(p, arg1, 0);
11439         return ret;
11440     case TARGET_NR_uname:
11441         /* no need to transcode because we use the linux syscall */
11442         {
11443             struct new_utsname * buf;
11444 
11445             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11446                 return -TARGET_EFAULT;
11447             ret = get_errno(sys_uname(buf));
11448             if (!is_error(ret)) {
11449                 /* Overwrite the native machine name with whatever is being
11450                    emulated. */
11451                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11452                           sizeof(buf->machine));
11453                 /* Allow the user to override the reported release.  */
11454                 if (qemu_uname_release && *qemu_uname_release) {
11455                     g_strlcpy(buf->release, qemu_uname_release,
11456                               sizeof(buf->release));
11457                 }
11458             }
11459             unlock_user_struct(buf, arg1, 1);
11460         }
11461         return ret;
11462 #ifdef TARGET_I386
11463     case TARGET_NR_modify_ldt:
11464         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11465 #if !defined(TARGET_X86_64)
11466     case TARGET_NR_vm86:
11467         return do_vm86(cpu_env, arg1, arg2);
11468 #endif
11469 #endif
11470 #if defined(TARGET_NR_adjtimex)
11471     case TARGET_NR_adjtimex:
11472         {
11473             struct timex host_buf;
11474 
11475             if (target_to_host_timex(&host_buf, arg1) != 0) {
11476                 return -TARGET_EFAULT;
11477             }
11478             ret = get_errno(adjtimex(&host_buf));
11479             if (!is_error(ret)) {
11480                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11481                     return -TARGET_EFAULT;
11482                 }
11483             }
11484         }
11485         return ret;
11486 #endif
11487 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11488     case TARGET_NR_clock_adjtime:
11489         {
11490             struct timex htx;
11491 
11492             if (target_to_host_timex(&htx, arg2) != 0) {
11493                 return -TARGET_EFAULT;
11494             }
11495             ret = get_errno(clock_adjtime(arg1, &htx));
11496             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11497                 return -TARGET_EFAULT;
11498             }
11499         }
11500         return ret;
11501 #endif
11502 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11503     case TARGET_NR_clock_adjtime64:
11504         {
11505             struct timex htx;
11506 
11507             if (target_to_host_timex64(&htx, arg2) != 0) {
11508                 return -TARGET_EFAULT;
11509             }
11510             ret = get_errno(clock_adjtime(arg1, &htx));
11511             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11512                     return -TARGET_EFAULT;
11513             }
11514         }
11515         return ret;
11516 #endif
11517     case TARGET_NR_getpgid:
11518         return get_errno(getpgid(arg1));
11519     case TARGET_NR_fchdir:
11520         return get_errno(fchdir(arg1));
11521     case TARGET_NR_personality:
11522         return get_errno(personality(arg1));
11523 #ifdef TARGET_NR__llseek /* Not on alpha */
11524     case TARGET_NR__llseek:
11525         {
11526             int64_t res;
11527 #if !defined(__NR_llseek)
11528             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11529             if (res == -1) {
11530                 ret = get_errno(res);
11531             } else {
11532                 ret = 0;
11533             }
11534 #else
11535             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11536 #endif
11537             if ((ret == 0) && put_user_s64(res, arg4)) {
11538                 return -TARGET_EFAULT;
11539             }
11540         }
11541         return ret;
11542 #endif
11543 #ifdef TARGET_NR_getdents
11544     case TARGET_NR_getdents:
11545         return do_getdents(arg1, arg2, arg3);
11546 #endif /* TARGET_NR_getdents */
11547 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11548     case TARGET_NR_getdents64:
11549         return do_getdents64(arg1, arg2, arg3);
11550 #endif /* TARGET_NR_getdents64 */
11551 #if defined(TARGET_NR__newselect)
11552     case TARGET_NR__newselect:
11553         return do_select(arg1, arg2, arg3, arg4, arg5);
11554 #endif
11555 #ifdef TARGET_NR_poll
11556     case TARGET_NR_poll:
11557         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11558 #endif
11559 #ifdef TARGET_NR_ppoll
11560     case TARGET_NR_ppoll:
11561         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11562 #endif
11563 #ifdef TARGET_NR_ppoll_time64
11564     case TARGET_NR_ppoll_time64:
11565         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11566 #endif
11567     case TARGET_NR_flock:
11568         /* NOTE: the flock constant seems to be the same for every
11569            Linux platform */
11570         return get_errno(safe_flock(arg1, arg2));
11571     case TARGET_NR_readv:
11572         {
11573             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11574             if (vec != NULL) {
11575                 ret = get_errno(safe_readv(arg1, vec, arg3));
11576                 unlock_iovec(vec, arg2, arg3, 1);
11577             } else {
11578                 ret = -host_to_target_errno(errno);
11579             }
11580         }
11581         return ret;
11582     case TARGET_NR_writev:
11583         {
11584             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11585             if (vec != NULL) {
11586                 ret = get_errno(safe_writev(arg1, vec, arg3));
11587                 unlock_iovec(vec, arg2, arg3, 0);
11588             } else {
11589                 ret = -host_to_target_errno(errno);
11590             }
11591         }
11592         return ret;
11593 #if defined(TARGET_NR_preadv)
11594     case TARGET_NR_preadv:
11595         {
11596             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11597             if (vec != NULL) {
11598                 unsigned long low, high;
11599 
11600                 target_to_host_low_high(arg4, arg5, &low, &high);
11601                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11602                 unlock_iovec(vec, arg2, arg3, 1);
11603             } else {
11604                 ret = -host_to_target_errno(errno);
11605            }
11606         }
11607         return ret;
11608 #endif
11609 #if defined(TARGET_NR_pwritev)
11610     case TARGET_NR_pwritev:
11611         {
11612             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11613             if (vec != NULL) {
11614                 unsigned long low, high;
11615 
11616                 target_to_host_low_high(arg4, arg5, &low, &high);
11617                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11618                 unlock_iovec(vec, arg2, arg3, 0);
11619             } else {
11620                 ret = -host_to_target_errno(errno);
11621            }
11622         }
11623         return ret;
11624 #endif
11625     case TARGET_NR_getsid:
11626         return get_errno(getsid(arg1));
11627 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11628     case TARGET_NR_fdatasync:
11629         return get_errno(fdatasync(arg1));
11630 #endif
11631     case TARGET_NR_sched_getaffinity:
11632         {
11633             unsigned int mask_size;
11634             unsigned long *mask;
11635 
11636             /*
11637              * sched_getaffinity needs multiples of ulong, so need to take
11638              * care of mismatches between target ulong and host ulong sizes.
11639              */
11640             if (arg2 & (sizeof(abi_ulong) - 1)) {
11641                 return -TARGET_EINVAL;
11642             }
11643             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11644 
11645             mask = alloca(mask_size);
11646             memset(mask, 0, mask_size);
11647             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11648 
11649             if (!is_error(ret)) {
11650                 if (ret > arg2) {
11651                     /* More data returned than the caller's buffer will fit.
11652                      * This only happens if sizeof(abi_long) < sizeof(long)
11653                      * and the caller passed us a buffer holding an odd number
11654                      * of abi_longs. If the host kernel is actually using the
11655                      * extra 4 bytes then fail EINVAL; otherwise we can just
11656                      * ignore them and only copy the interesting part.
11657                      */
11658                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11659                     if (numcpus > arg2 * 8) {
11660                         return -TARGET_EINVAL;
11661                     }
11662                     ret = arg2;
11663                 }
11664 
11665                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11666                     return -TARGET_EFAULT;
11667                 }
11668             }
11669         }
11670         return ret;
11671     case TARGET_NR_sched_setaffinity:
11672         {
11673             unsigned int mask_size;
11674             unsigned long *mask;
11675 
11676             /*
11677              * sched_setaffinity needs multiples of ulong, so need to take
11678              * care of mismatches between target ulong and host ulong sizes.
11679              */
11680             if (arg2 & (sizeof(abi_ulong) - 1)) {
11681                 return -TARGET_EINVAL;
11682             }
11683             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11684             mask = alloca(mask_size);
11685 
11686             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11687             if (ret) {
11688                 return ret;
11689             }
11690 
11691             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11692         }
11693     case TARGET_NR_getcpu:
11694         {
11695             unsigned cpuid, node;
11696             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11697                                        arg2 ? &node : NULL,
11698                                        NULL));
11699             if (is_error(ret)) {
11700                 return ret;
11701             }
11702             if (arg1 && put_user_u32(cpuid, arg1)) {
11703                 return -TARGET_EFAULT;
11704             }
11705             if (arg2 && put_user_u32(node, arg2)) {
11706                 return -TARGET_EFAULT;
11707             }
11708         }
11709         return ret;
11710     case TARGET_NR_sched_setparam:
11711         {
11712             struct target_sched_param *target_schp;
11713             struct sched_param schp;
11714 
11715             if (arg2 == 0) {
11716                 return -TARGET_EINVAL;
11717             }
11718             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11719                 return -TARGET_EFAULT;
11720             }
11721             schp.sched_priority = tswap32(target_schp->sched_priority);
11722             unlock_user_struct(target_schp, arg2, 0);
11723             return get_errno(sys_sched_setparam(arg1, &schp));
11724         }
11725     case TARGET_NR_sched_getparam:
11726         {
11727             struct target_sched_param *target_schp;
11728             struct sched_param schp;
11729 
11730             if (arg2 == 0) {
11731                 return -TARGET_EINVAL;
11732             }
11733             ret = get_errno(sys_sched_getparam(arg1, &schp));
11734             if (!is_error(ret)) {
11735                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11736                     return -TARGET_EFAULT;
11737                 }
11738                 target_schp->sched_priority = tswap32(schp.sched_priority);
11739                 unlock_user_struct(target_schp, arg2, 1);
11740             }
11741         }
11742         return ret;
11743     case TARGET_NR_sched_setscheduler:
11744         {
11745             struct target_sched_param *target_schp;
11746             struct sched_param schp;
11747             if (arg3 == 0) {
11748                 return -TARGET_EINVAL;
11749             }
11750             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11751                 return -TARGET_EFAULT;
11752             }
11753             schp.sched_priority = tswap32(target_schp->sched_priority);
11754             unlock_user_struct(target_schp, arg3, 0);
11755             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11756         }
11757     case TARGET_NR_sched_getscheduler:
11758         return get_errno(sys_sched_getscheduler(arg1));
11759     case TARGET_NR_sched_getattr:
11760         {
11761             struct target_sched_attr *target_scha;
11762             struct sched_attr scha;
11763             if (arg2 == 0) {
11764                 return -TARGET_EINVAL;
11765             }
11766             if (arg3 > sizeof(scha)) {
11767                 arg3 = sizeof(scha);
11768             }
11769             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11770             if (!is_error(ret)) {
11771                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11772                 if (!target_scha) {
11773                     return -TARGET_EFAULT;
11774                 }
11775                 target_scha->size = tswap32(scha.size);
11776                 target_scha->sched_policy = tswap32(scha.sched_policy);
11777                 target_scha->sched_flags = tswap64(scha.sched_flags);
11778                 target_scha->sched_nice = tswap32(scha.sched_nice);
11779                 target_scha->sched_priority = tswap32(scha.sched_priority);
11780                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11781                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11782                 target_scha->sched_period = tswap64(scha.sched_period);
11783                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11784                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11785                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11786                 }
11787                 unlock_user(target_scha, arg2, arg3);
11788             }
11789             return ret;
11790         }
11791     case TARGET_NR_sched_setattr:
11792         {
11793             struct target_sched_attr *target_scha;
11794             struct sched_attr scha;
11795             uint32_t size;
11796             int zeroed;
11797             if (arg2 == 0) {
11798                 return -TARGET_EINVAL;
11799             }
11800             if (get_user_u32(size, arg2)) {
11801                 return -TARGET_EFAULT;
11802             }
11803             if (!size) {
11804                 size = offsetof(struct target_sched_attr, sched_util_min);
11805             }
11806             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11807                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11808                     return -TARGET_EFAULT;
11809                 }
11810                 return -TARGET_E2BIG;
11811             }
11812 
11813             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11814             if (zeroed < 0) {
11815                 return zeroed;
11816             } else if (zeroed == 0) {
11817                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11818                     return -TARGET_EFAULT;
11819                 }
11820                 return -TARGET_E2BIG;
11821             }
11822             if (size > sizeof(struct target_sched_attr)) {
11823                 size = sizeof(struct target_sched_attr);
11824             }
11825 
11826             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11827             if (!target_scha) {
11828                 return -TARGET_EFAULT;
11829             }
11830             scha.size = size;
11831             scha.sched_policy = tswap32(target_scha->sched_policy);
11832             scha.sched_flags = tswap64(target_scha->sched_flags);
11833             scha.sched_nice = tswap32(target_scha->sched_nice);
11834             scha.sched_priority = tswap32(target_scha->sched_priority);
11835             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11836             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11837             scha.sched_period = tswap64(target_scha->sched_period);
11838             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11839                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11840                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11841             }
11842             unlock_user(target_scha, arg2, 0);
11843             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11844         }
11845     case TARGET_NR_sched_yield:
11846         return get_errno(sched_yield());
11847     case TARGET_NR_sched_get_priority_max:
11848         return get_errno(sched_get_priority_max(arg1));
11849     case TARGET_NR_sched_get_priority_min:
11850         return get_errno(sched_get_priority_min(arg1));
11851 #ifdef TARGET_NR_sched_rr_get_interval
11852     case TARGET_NR_sched_rr_get_interval:
11853         {
11854             struct timespec ts;
11855             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11856             if (!is_error(ret)) {
11857                 ret = host_to_target_timespec(arg2, &ts);
11858             }
11859         }
11860         return ret;
11861 #endif
11862 #ifdef TARGET_NR_sched_rr_get_interval_time64
11863     case TARGET_NR_sched_rr_get_interval_time64:
11864         {
11865             struct timespec ts;
11866             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11867             if (!is_error(ret)) {
11868                 ret = host_to_target_timespec64(arg2, &ts);
11869             }
11870         }
11871         return ret;
11872 #endif
11873 #if defined(TARGET_NR_nanosleep)
11874     case TARGET_NR_nanosleep:
11875         {
11876             struct timespec req, rem;
11877             if (target_to_host_timespec(&req, arg1)) {
11878                 return -TARGET_EFAULT;
11879             }
11880             ret = get_errno(safe_nanosleep(&req, &rem));
11881             if (is_error(ret) && arg2) {
11882                 if (host_to_target_timespec(arg2, &rem)) {
11883                     return -TARGET_EFAULT;
11884                 }
11885             }
11886         }
11887         return ret;
11888 #endif
11889     case TARGET_NR_prctl:
11890         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11891         break;
11892 #ifdef TARGET_NR_arch_prctl
11893     case TARGET_NR_arch_prctl:
11894         return do_arch_prctl(cpu_env, arg1, arg2);
11895 #endif
11896 #ifdef TARGET_NR_pread64
11897     case TARGET_NR_pread64:
11898         if (regpairs_aligned(cpu_env, num)) {
11899             arg4 = arg5;
11900             arg5 = arg6;
11901         }
11902         if (arg2 == 0 && arg3 == 0) {
11903             /* Special-case NULL buffer and zero length, which should succeed */
11904             p = 0;
11905         } else {
11906             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11907             if (!p) {
11908                 return -TARGET_EFAULT;
11909             }
11910         }
11911         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11912         unlock_user(p, arg2, ret);
11913         return ret;
11914     case TARGET_NR_pwrite64:
11915         if (regpairs_aligned(cpu_env, num)) {
11916             arg4 = arg5;
11917             arg5 = arg6;
11918         }
11919         if (arg2 == 0 && arg3 == 0) {
11920             /* Special-case NULL buffer and zero length, which should succeed */
11921             p = 0;
11922         } else {
11923             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11924             if (!p) {
11925                 return -TARGET_EFAULT;
11926             }
11927         }
11928         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11929         unlock_user(p, arg2, 0);
11930         return ret;
11931 #endif
11932     case TARGET_NR_getcwd:
11933         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11934             return -TARGET_EFAULT;
11935         ret = get_errno(sys_getcwd1(p, arg2));
11936         unlock_user(p, arg1, ret);
11937         return ret;
11938     case TARGET_NR_capget:
11939     case TARGET_NR_capset:
11940     {
11941         struct target_user_cap_header *target_header;
11942         struct target_user_cap_data *target_data = NULL;
11943         struct __user_cap_header_struct header;
11944         struct __user_cap_data_struct data[2];
11945         struct __user_cap_data_struct *dataptr = NULL;
11946         int i, target_datalen;
11947         int data_items = 1;
11948 
11949         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11950             return -TARGET_EFAULT;
11951         }
11952         header.version = tswap32(target_header->version);
11953         header.pid = tswap32(target_header->pid);
11954 
11955         if (header.version != _LINUX_CAPABILITY_VERSION) {
11956             /* Version 2 and up takes pointer to two user_data structs */
11957             data_items = 2;
11958         }
11959 
11960         target_datalen = sizeof(*target_data) * data_items;
11961 
11962         if (arg2) {
11963             if (num == TARGET_NR_capget) {
11964                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11965             } else {
11966                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11967             }
11968             if (!target_data) {
11969                 unlock_user_struct(target_header, arg1, 0);
11970                 return -TARGET_EFAULT;
11971             }
11972 
11973             if (num == TARGET_NR_capset) {
11974                 for (i = 0; i < data_items; i++) {
11975                     data[i].effective = tswap32(target_data[i].effective);
11976                     data[i].permitted = tswap32(target_data[i].permitted);
11977                     data[i].inheritable = tswap32(target_data[i].inheritable);
11978                 }
11979             }
11980 
11981             dataptr = data;
11982         }
11983 
11984         if (num == TARGET_NR_capget) {
11985             ret = get_errno(capget(&header, dataptr));
11986         } else {
11987             ret = get_errno(capset(&header, dataptr));
11988         }
11989 
11990         /* The kernel always updates version for both capget and capset */
11991         target_header->version = tswap32(header.version);
11992         unlock_user_struct(target_header, arg1, 1);
11993 
11994         if (arg2) {
11995             if (num == TARGET_NR_capget) {
11996                 for (i = 0; i < data_items; i++) {
11997                     target_data[i].effective = tswap32(data[i].effective);
11998                     target_data[i].permitted = tswap32(data[i].permitted);
11999                     target_data[i].inheritable = tswap32(data[i].inheritable);
12000                 }
12001                 unlock_user(target_data, arg2, target_datalen);
12002             } else {
12003                 unlock_user(target_data, arg2, 0);
12004             }
12005         }
12006         return ret;
12007     }
12008     case TARGET_NR_sigaltstack:
12009         return do_sigaltstack(arg1, arg2, cpu_env);
12010 
12011 #ifdef CONFIG_SENDFILE
12012 #ifdef TARGET_NR_sendfile
12013     case TARGET_NR_sendfile:
12014     {
12015         off_t *offp = NULL;
12016         off_t off;
12017         if (arg3) {
12018             ret = get_user_sal(off, arg3);
12019             if (is_error(ret)) {
12020                 return ret;
12021             }
12022             offp = &off;
12023         }
12024         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
12025         if (!is_error(ret) && arg3) {
12026             abi_long ret2 = put_user_sal(off, arg3);
12027             if (is_error(ret2)) {
12028                 ret = ret2;
12029             }
12030         }
12031         return ret;
12032     }
12033 #endif
12034 #ifdef TARGET_NR_sendfile64
12035     case TARGET_NR_sendfile64:
12036     {
12037         off_t *offp = NULL;
12038         off_t off;
12039         if (arg3) {
12040             ret = get_user_s64(off, arg3);
12041             if (is_error(ret)) {
12042                 return ret;
12043             }
12044             offp = &off;
12045         }
12046         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
12047         if (!is_error(ret) && arg3) {
12048             abi_long ret2 = put_user_s64(off, arg3);
12049             if (is_error(ret2)) {
12050                 ret = ret2;
12051             }
12052         }
12053         return ret;
12054     }
12055 #endif
12056 #endif
12057 #ifdef TARGET_NR_vfork
12058     case TARGET_NR_vfork:
12059         return get_errno(do_fork(cpu_env,
12060                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
12061                          0, 0, 0, 0));
12062 #endif
12063 #ifdef TARGET_NR_ugetrlimit
12064     case TARGET_NR_ugetrlimit:
12065     {
12066 	struct rlimit rlim;
12067 	int resource = target_to_host_resource(arg1);
12068 	ret = get_errno(getrlimit(resource, &rlim));
12069 	if (!is_error(ret)) {
12070 	    struct target_rlimit *target_rlim;
12071             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
12072                 return -TARGET_EFAULT;
12073 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
12074 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
12075             unlock_user_struct(target_rlim, arg2, 1);
12076 	}
12077         return ret;
12078     }
12079 #endif
12080 #ifdef TARGET_NR_truncate64
12081     case TARGET_NR_truncate64:
12082         if (!(p = lock_user_string(arg1)))
12083             return -TARGET_EFAULT;
12084 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
12085         unlock_user(p, arg1, 0);
12086         return ret;
12087 #endif
12088 #ifdef TARGET_NR_ftruncate64
12089     case TARGET_NR_ftruncate64:
12090         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
12091 #endif
12092 #ifdef TARGET_NR_stat64
12093     case TARGET_NR_stat64:
12094         if (!(p = lock_user_string(arg1))) {
12095             return -TARGET_EFAULT;
12096         }
12097         ret = get_errno(stat(path(p), &st));
12098         unlock_user(p, arg1, 0);
12099         if (!is_error(ret))
12100             ret = host_to_target_stat64(cpu_env, arg2, &st);
12101         return ret;
12102 #endif
12103 #ifdef TARGET_NR_lstat64
12104     case TARGET_NR_lstat64:
12105         if (!(p = lock_user_string(arg1))) {
12106             return -TARGET_EFAULT;
12107         }
12108         ret = get_errno(lstat(path(p), &st));
12109         unlock_user(p, arg1, 0);
12110         if (!is_error(ret))
12111             ret = host_to_target_stat64(cpu_env, arg2, &st);
12112         return ret;
12113 #endif
12114 #ifdef TARGET_NR_fstat64
12115     case TARGET_NR_fstat64:
12116         ret = get_errno(fstat(arg1, &st));
12117         if (!is_error(ret))
12118             ret = host_to_target_stat64(cpu_env, arg2, &st);
12119         return ret;
12120 #endif
12121 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
12122 #ifdef TARGET_NR_fstatat64
12123     case TARGET_NR_fstatat64:
12124 #endif
12125 #ifdef TARGET_NR_newfstatat
12126     case TARGET_NR_newfstatat:
12127 #endif
12128         if (!(p = lock_user_string(arg2))) {
12129             return -TARGET_EFAULT;
12130         }
12131         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
12132         unlock_user(p, arg2, 0);
12133         if (!is_error(ret))
12134             ret = host_to_target_stat64(cpu_env, arg3, &st);
12135         return ret;
12136 #endif
12137 #if defined(TARGET_NR_statx)
12138     case TARGET_NR_statx:
12139         {
12140             struct target_statx *target_stx;
12141             int dirfd = arg1;
12142             int flags = arg3;
12143 
12144             p = lock_user_string(arg2);
12145             if (p == NULL) {
12146                 return -TARGET_EFAULT;
12147             }
12148 #if defined(__NR_statx)
12149             {
12150                 /*
12151                  * It is assumed that struct statx is architecture independent.
12152                  */
12153                 struct target_statx host_stx;
12154                 int mask = arg4;
12155 
12156                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
12157                 if (!is_error(ret)) {
12158                     if (host_to_target_statx(&host_stx, arg5) != 0) {
12159                         unlock_user(p, arg2, 0);
12160                         return -TARGET_EFAULT;
12161                     }
12162                 }
12163 
12164                 if (ret != -TARGET_ENOSYS) {
12165                     unlock_user(p, arg2, 0);
12166                     return ret;
12167                 }
12168             }
12169 #endif
12170             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
12171             unlock_user(p, arg2, 0);
12172 
12173             if (!is_error(ret)) {
12174                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
12175                     return -TARGET_EFAULT;
12176                 }
12177                 memset(target_stx, 0, sizeof(*target_stx));
12178                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
12179                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
12180                 __put_user(st.st_ino, &target_stx->stx_ino);
12181                 __put_user(st.st_mode, &target_stx->stx_mode);
12182                 __put_user(st.st_uid, &target_stx->stx_uid);
12183                 __put_user(st.st_gid, &target_stx->stx_gid);
12184                 __put_user(st.st_nlink, &target_stx->stx_nlink);
12185                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
12186                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
12187                 __put_user(st.st_size, &target_stx->stx_size);
12188                 __put_user(st.st_blksize, &target_stx->stx_blksize);
12189                 __put_user(st.st_blocks, &target_stx->stx_blocks);
12190                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
12191                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
12192                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
12193                 unlock_user_struct(target_stx, arg5, 1);
12194             }
12195         }
12196         return ret;
12197 #endif
12198 #ifdef TARGET_NR_lchown
12199     case TARGET_NR_lchown:
12200         if (!(p = lock_user_string(arg1)))
12201             return -TARGET_EFAULT;
12202         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
12203         unlock_user(p, arg1, 0);
12204         return ret;
12205 #endif
12206 #ifdef TARGET_NR_getuid
12207     case TARGET_NR_getuid:
12208         return get_errno(high2lowuid(getuid()));
12209 #endif
12210 #ifdef TARGET_NR_getgid
12211     case TARGET_NR_getgid:
12212         return get_errno(high2lowgid(getgid()));
12213 #endif
12214 #ifdef TARGET_NR_geteuid
12215     case TARGET_NR_geteuid:
12216         return get_errno(high2lowuid(geteuid()));
12217 #endif
12218 #ifdef TARGET_NR_getegid
12219     case TARGET_NR_getegid:
12220         return get_errno(high2lowgid(getegid()));
12221 #endif
12222     case TARGET_NR_setreuid:
12223         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
12224     case TARGET_NR_setregid:
12225         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
12226     case TARGET_NR_getgroups:
12227         { /* the same code as for TARGET_NR_getgroups32 */
12228             int gidsetsize = arg1;
12229             target_id *target_grouplist;
12230             g_autofree gid_t *grouplist = NULL;
12231             int i;
12232 
12233             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12234                 return -TARGET_EINVAL;
12235             }
12236             if (gidsetsize > 0) {
12237                 grouplist = g_try_new(gid_t, gidsetsize);
12238                 if (!grouplist) {
12239                     return -TARGET_ENOMEM;
12240                 }
12241             }
12242             ret = get_errno(getgroups(gidsetsize, grouplist));
12243             if (!is_error(ret) && gidsetsize > 0) {
12244                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12245                                              gidsetsize * sizeof(target_id), 0);
12246                 if (!target_grouplist) {
12247                     return -TARGET_EFAULT;
12248                 }
12249                 for (i = 0; i < ret; i++) {
12250                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12251                 }
12252                 unlock_user(target_grouplist, arg2,
12253                             gidsetsize * sizeof(target_id));
12254             }
12255             return ret;
12256         }
12257     case TARGET_NR_setgroups:
12258         { /* the same code as for TARGET_NR_setgroups32 */
12259             int gidsetsize = arg1;
12260             target_id *target_grouplist;
12261             g_autofree gid_t *grouplist = NULL;
12262             int i;
12263 
12264             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12265                 return -TARGET_EINVAL;
12266             }
12267             if (gidsetsize > 0) {
12268                 grouplist = g_try_new(gid_t, gidsetsize);
12269                 if (!grouplist) {
12270                     return -TARGET_ENOMEM;
12271                 }
12272                 target_grouplist = lock_user(VERIFY_READ, arg2,
12273                                              gidsetsize * sizeof(target_id), 1);
12274                 if (!target_grouplist) {
12275                     return -TARGET_EFAULT;
12276                 }
12277                 for (i = 0; i < gidsetsize; i++) {
12278                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12279                 }
12280                 unlock_user(target_grouplist, arg2,
12281                             gidsetsize * sizeof(target_id));
12282             }
12283             return get_errno(sys_setgroups(gidsetsize, grouplist));
12284         }
12285     case TARGET_NR_fchown:
12286         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12287 #if defined(TARGET_NR_fchownat)
12288     case TARGET_NR_fchownat:
12289         if (!(p = lock_user_string(arg2)))
12290             return -TARGET_EFAULT;
12291         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12292                                  low2highgid(arg4), arg5));
12293         unlock_user(p, arg2, 0);
12294         return ret;
12295 #endif
12296 #ifdef TARGET_NR_setresuid
12297     case TARGET_NR_setresuid:
12298         return get_errno(sys_setresuid(low2highuid(arg1),
12299                                        low2highuid(arg2),
12300                                        low2highuid(arg3)));
12301 #endif
12302 #ifdef TARGET_NR_getresuid
12303     case TARGET_NR_getresuid:
12304         {
12305             uid_t ruid, euid, suid;
12306             ret = get_errno(getresuid(&ruid, &euid, &suid));
12307             if (!is_error(ret)) {
12308                 if (put_user_id(high2lowuid(ruid), arg1)
12309                     || put_user_id(high2lowuid(euid), arg2)
12310                     || put_user_id(high2lowuid(suid), arg3))
12311                     return -TARGET_EFAULT;
12312             }
12313         }
12314         return ret;
12315 #endif
12316 #ifdef TARGET_NR_getresgid
12317     case TARGET_NR_setresgid:
12318         return get_errno(sys_setresgid(low2highgid(arg1),
12319                                        low2highgid(arg2),
12320                                        low2highgid(arg3)));
12321 #endif
12322 #ifdef TARGET_NR_getresgid
12323     case TARGET_NR_getresgid:
12324         {
12325             gid_t rgid, egid, sgid;
12326             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12327             if (!is_error(ret)) {
12328                 if (put_user_id(high2lowgid(rgid), arg1)
12329                     || put_user_id(high2lowgid(egid), arg2)
12330                     || put_user_id(high2lowgid(sgid), arg3))
12331                     return -TARGET_EFAULT;
12332             }
12333         }
12334         return ret;
12335 #endif
12336 #ifdef TARGET_NR_chown
12337     case TARGET_NR_chown:
12338         if (!(p = lock_user_string(arg1)))
12339             return -TARGET_EFAULT;
12340         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12341         unlock_user(p, arg1, 0);
12342         return ret;
12343 #endif
12344     case TARGET_NR_setuid:
12345         return get_errno(sys_setuid(low2highuid(arg1)));
12346     case TARGET_NR_setgid:
12347         return get_errno(sys_setgid(low2highgid(arg1)));
12348     case TARGET_NR_setfsuid:
12349         return get_errno(setfsuid(arg1));
12350     case TARGET_NR_setfsgid:
12351         return get_errno(setfsgid(arg1));
12352 
12353 #ifdef TARGET_NR_lchown32
12354     case TARGET_NR_lchown32:
12355         if (!(p = lock_user_string(arg1)))
12356             return -TARGET_EFAULT;
12357         ret = get_errno(lchown(p, arg2, arg3));
12358         unlock_user(p, arg1, 0);
12359         return ret;
12360 #endif
12361 #ifdef TARGET_NR_getuid32
12362     case TARGET_NR_getuid32:
12363         return get_errno(getuid());
12364 #endif
12365 
12366 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12367    /* Alpha specific */
12368     case TARGET_NR_getxuid:
12369          {
12370             uid_t euid;
12371             euid=geteuid();
12372             cpu_env->ir[IR_A4]=euid;
12373          }
12374         return get_errno(getuid());
12375 #endif
12376 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12377    /* Alpha specific */
12378     case TARGET_NR_getxgid:
12379          {
12380             uid_t egid;
12381             egid=getegid();
12382             cpu_env->ir[IR_A4]=egid;
12383          }
12384         return get_errno(getgid());
12385 #endif
12386 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12387     /* Alpha specific */
12388     case TARGET_NR_osf_getsysinfo:
12389         ret = -TARGET_EOPNOTSUPP;
12390         switch (arg1) {
12391           case TARGET_GSI_IEEE_FP_CONTROL:
12392             {
12393                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12394                 uint64_t swcr = cpu_env->swcr;
12395 
12396                 swcr &= ~SWCR_STATUS_MASK;
12397                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12398 
12399                 if (put_user_u64 (swcr, arg2))
12400                         return -TARGET_EFAULT;
12401                 ret = 0;
12402             }
12403             break;
12404 
12405           /* case GSI_IEEE_STATE_AT_SIGNAL:
12406              -- Not implemented in linux kernel.
12407              case GSI_UACPROC:
12408              -- Retrieves current unaligned access state; not much used.
12409              case GSI_PROC_TYPE:
12410              -- Retrieves implver information; surely not used.
12411              case GSI_GET_HWRPB:
12412              -- Grabs a copy of the HWRPB; surely not used.
12413           */
12414         }
12415         return ret;
12416 #endif
12417 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12418     /* Alpha specific */
12419     case TARGET_NR_osf_setsysinfo:
12420         ret = -TARGET_EOPNOTSUPP;
12421         switch (arg1) {
12422           case TARGET_SSI_IEEE_FP_CONTROL:
12423             {
12424                 uint64_t swcr, fpcr;
12425 
12426                 if (get_user_u64 (swcr, arg2)) {
12427                     return -TARGET_EFAULT;
12428                 }
12429 
12430                 /*
12431                  * The kernel calls swcr_update_status to update the
12432                  * status bits from the fpcr at every point that it
12433                  * could be queried.  Therefore, we store the status
12434                  * bits only in FPCR.
12435                  */
12436                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12437 
12438                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12439                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12440                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12441                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12442                 ret = 0;
12443             }
12444             break;
12445 
12446           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12447             {
12448                 uint64_t exc, fpcr, fex;
12449 
12450                 if (get_user_u64(exc, arg2)) {
12451                     return -TARGET_EFAULT;
12452                 }
12453                 exc &= SWCR_STATUS_MASK;
12454                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12455 
12456                 /* Old exceptions are not signaled.  */
12457                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12458                 fex = exc & ~fex;
12459                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12460                 fex &= (cpu_env)->swcr;
12461 
12462                 /* Update the hardware fpcr.  */
12463                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12464                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12465 
12466                 if (fex) {
12467                     int si_code = TARGET_FPE_FLTUNK;
12468                     target_siginfo_t info;
12469 
12470                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12471                         si_code = TARGET_FPE_FLTUND;
12472                     }
12473                     if (fex & SWCR_TRAP_ENABLE_INE) {
12474                         si_code = TARGET_FPE_FLTRES;
12475                     }
12476                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12477                         si_code = TARGET_FPE_FLTUND;
12478                     }
12479                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12480                         si_code = TARGET_FPE_FLTOVF;
12481                     }
12482                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12483                         si_code = TARGET_FPE_FLTDIV;
12484                     }
12485                     if (fex & SWCR_TRAP_ENABLE_INV) {
12486                         si_code = TARGET_FPE_FLTINV;
12487                     }
12488 
12489                     info.si_signo = SIGFPE;
12490                     info.si_errno = 0;
12491                     info.si_code = si_code;
12492                     info._sifields._sigfault._addr = (cpu_env)->pc;
12493                     queue_signal(cpu_env, info.si_signo,
12494                                  QEMU_SI_FAULT, &info);
12495                 }
12496                 ret = 0;
12497             }
12498             break;
12499 
12500           /* case SSI_NVPAIRS:
12501              -- Used with SSIN_UACPROC to enable unaligned accesses.
12502              case SSI_IEEE_STATE_AT_SIGNAL:
12503              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12504              -- Not implemented in linux kernel
12505           */
12506         }
12507         return ret;
12508 #endif
12509 #ifdef TARGET_NR_osf_sigprocmask
12510     /* Alpha specific.  */
12511     case TARGET_NR_osf_sigprocmask:
12512         {
12513             abi_ulong mask;
12514             int how;
12515             sigset_t set, oldset;
12516 
12517             switch(arg1) {
12518             case TARGET_SIG_BLOCK:
12519                 how = SIG_BLOCK;
12520                 break;
12521             case TARGET_SIG_UNBLOCK:
12522                 how = SIG_UNBLOCK;
12523                 break;
12524             case TARGET_SIG_SETMASK:
12525                 how = SIG_SETMASK;
12526                 break;
12527             default:
12528                 return -TARGET_EINVAL;
12529             }
12530             mask = arg2;
12531             target_to_host_old_sigset(&set, &mask);
12532             ret = do_sigprocmask(how, &set, &oldset);
12533             if (!ret) {
12534                 host_to_target_old_sigset(&mask, &oldset);
12535                 ret = mask;
12536             }
12537         }
12538         return ret;
12539 #endif
12540 
12541 #ifdef TARGET_NR_getgid32
12542     case TARGET_NR_getgid32:
12543         return get_errno(getgid());
12544 #endif
12545 #ifdef TARGET_NR_geteuid32
12546     case TARGET_NR_geteuid32:
12547         return get_errno(geteuid());
12548 #endif
12549 #ifdef TARGET_NR_getegid32
12550     case TARGET_NR_getegid32:
12551         return get_errno(getegid());
12552 #endif
12553 #ifdef TARGET_NR_setreuid32
12554     case TARGET_NR_setreuid32:
12555         return get_errno(sys_setreuid(arg1, arg2));
12556 #endif
12557 #ifdef TARGET_NR_setregid32
12558     case TARGET_NR_setregid32:
12559         return get_errno(sys_setregid(arg1, arg2));
12560 #endif
12561 #ifdef TARGET_NR_getgroups32
12562     case TARGET_NR_getgroups32:
12563         { /* the same code as for TARGET_NR_getgroups */
12564             int gidsetsize = arg1;
12565             uint32_t *target_grouplist;
12566             g_autofree gid_t *grouplist = NULL;
12567             int i;
12568 
12569             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12570                 return -TARGET_EINVAL;
12571             }
12572             if (gidsetsize > 0) {
12573                 grouplist = g_try_new(gid_t, gidsetsize);
12574                 if (!grouplist) {
12575                     return -TARGET_ENOMEM;
12576                 }
12577             }
12578             ret = get_errno(getgroups(gidsetsize, grouplist));
12579             if (!is_error(ret) && gidsetsize > 0) {
12580                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12581                                              gidsetsize * 4, 0);
12582                 if (!target_grouplist) {
12583                     return -TARGET_EFAULT;
12584                 }
12585                 for (i = 0; i < ret; i++) {
12586                     target_grouplist[i] = tswap32(grouplist[i]);
12587                 }
12588                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12589             }
12590             return ret;
12591         }
12592 #endif
12593 #ifdef TARGET_NR_setgroups32
12594     case TARGET_NR_setgroups32:
12595         { /* the same code as for TARGET_NR_setgroups */
12596             int gidsetsize = arg1;
12597             uint32_t *target_grouplist;
12598             g_autofree gid_t *grouplist = NULL;
12599             int i;
12600 
12601             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12602                 return -TARGET_EINVAL;
12603             }
12604             if (gidsetsize > 0) {
12605                 grouplist = g_try_new(gid_t, gidsetsize);
12606                 if (!grouplist) {
12607                     return -TARGET_ENOMEM;
12608                 }
12609                 target_grouplist = lock_user(VERIFY_READ, arg2,
12610                                              gidsetsize * 4, 1);
12611                 if (!target_grouplist) {
12612                     return -TARGET_EFAULT;
12613                 }
12614                 for (i = 0; i < gidsetsize; i++) {
12615                     grouplist[i] = tswap32(target_grouplist[i]);
12616                 }
12617                 unlock_user(target_grouplist, arg2, 0);
12618             }
12619             return get_errno(sys_setgroups(gidsetsize, grouplist));
12620         }
12621 #endif
12622 #ifdef TARGET_NR_fchown32
12623     case TARGET_NR_fchown32:
12624         return get_errno(fchown(arg1, arg2, arg3));
12625 #endif
12626 #ifdef TARGET_NR_setresuid32
12627     case TARGET_NR_setresuid32:
12628         return get_errno(sys_setresuid(arg1, arg2, arg3));
12629 #endif
12630 #ifdef TARGET_NR_getresuid32
12631     case TARGET_NR_getresuid32:
12632         {
12633             uid_t ruid, euid, suid;
12634             ret = get_errno(getresuid(&ruid, &euid, &suid));
12635             if (!is_error(ret)) {
12636                 if (put_user_u32(ruid, arg1)
12637                     || put_user_u32(euid, arg2)
12638                     || put_user_u32(suid, arg3))
12639                     return -TARGET_EFAULT;
12640             }
12641         }
12642         return ret;
12643 #endif
12644 #ifdef TARGET_NR_setresgid32
12645     case TARGET_NR_setresgid32:
12646         return get_errno(sys_setresgid(arg1, arg2, arg3));
12647 #endif
12648 #ifdef TARGET_NR_getresgid32
12649     case TARGET_NR_getresgid32:
12650         {
12651             gid_t rgid, egid, sgid;
12652             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12653             if (!is_error(ret)) {
12654                 if (put_user_u32(rgid, arg1)
12655                     || put_user_u32(egid, arg2)
12656                     || put_user_u32(sgid, arg3))
12657                     return -TARGET_EFAULT;
12658             }
12659         }
12660         return ret;
12661 #endif
12662 #ifdef TARGET_NR_chown32
12663     case TARGET_NR_chown32:
12664         if (!(p = lock_user_string(arg1)))
12665             return -TARGET_EFAULT;
12666         ret = get_errno(chown(p, arg2, arg3));
12667         unlock_user(p, arg1, 0);
12668         return ret;
12669 #endif
12670 #ifdef TARGET_NR_setuid32
12671     case TARGET_NR_setuid32:
12672         return get_errno(sys_setuid(arg1));
12673 #endif
12674 #ifdef TARGET_NR_setgid32
12675     case TARGET_NR_setgid32:
12676         return get_errno(sys_setgid(arg1));
12677 #endif
12678 #ifdef TARGET_NR_setfsuid32
12679     case TARGET_NR_setfsuid32:
12680         return get_errno(setfsuid(arg1));
12681 #endif
12682 #ifdef TARGET_NR_setfsgid32
12683     case TARGET_NR_setfsgid32:
12684         return get_errno(setfsgid(arg1));
12685 #endif
12686 #ifdef TARGET_NR_mincore
12687     case TARGET_NR_mincore:
12688         {
12689             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12690             if (!a) {
12691                 return -TARGET_ENOMEM;
12692             }
12693             p = lock_user_string(arg3);
12694             if (!p) {
12695                 ret = -TARGET_EFAULT;
12696             } else {
12697                 ret = get_errno(mincore(a, arg2, p));
12698                 unlock_user(p, arg3, ret);
12699             }
12700             unlock_user(a, arg1, 0);
12701         }
12702         return ret;
12703 #endif
12704 #ifdef TARGET_NR_arm_fadvise64_64
12705     case TARGET_NR_arm_fadvise64_64:
12706         /* arm_fadvise64_64 looks like fadvise64_64 but
12707          * with different argument order: fd, advice, offset, len
12708          * rather than the usual fd, offset, len, advice.
12709          * Note that offset and len are both 64-bit so appear as
12710          * pairs of 32-bit registers.
12711          */
12712         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12713                             target_offset64(arg5, arg6), arg2);
12714         return -host_to_target_errno(ret);
12715 #endif
12716 
12717 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12718 
12719 #ifdef TARGET_NR_fadvise64_64
12720     case TARGET_NR_fadvise64_64:
12721 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12722         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12723         ret = arg2;
12724         arg2 = arg3;
12725         arg3 = arg4;
12726         arg4 = arg5;
12727         arg5 = arg6;
12728         arg6 = ret;
12729 #else
12730         /* 6 args: fd, offset (high, low), len (high, low), advice */
12731         if (regpairs_aligned(cpu_env, num)) {
12732             /* offset is in (3,4), len in (5,6) and advice in 7 */
12733             arg2 = arg3;
12734             arg3 = arg4;
12735             arg4 = arg5;
12736             arg5 = arg6;
12737             arg6 = arg7;
12738         }
12739 #endif
12740         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12741                             target_offset64(arg4, arg5), arg6);
12742         return -host_to_target_errno(ret);
12743 #endif
12744 
12745 #ifdef TARGET_NR_fadvise64
12746     case TARGET_NR_fadvise64:
12747         /* 5 args: fd, offset (high, low), len, advice */
12748         if (regpairs_aligned(cpu_env, num)) {
12749             /* offset is in (3,4), len in 5 and advice in 6 */
12750             arg2 = arg3;
12751             arg3 = arg4;
12752             arg4 = arg5;
12753             arg5 = arg6;
12754         }
12755         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12756         return -host_to_target_errno(ret);
12757 #endif
12758 
12759 #else /* not a 32-bit ABI */
12760 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12761 #ifdef TARGET_NR_fadvise64_64
12762     case TARGET_NR_fadvise64_64:
12763 #endif
12764 #ifdef TARGET_NR_fadvise64
12765     case TARGET_NR_fadvise64:
12766 #endif
12767 #ifdef TARGET_S390X
12768         switch (arg4) {
12769         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12770         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12771         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12772         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12773         default: break;
12774         }
12775 #endif
12776         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12777 #endif
12778 #endif /* end of 64-bit ABI fadvise handling */
12779 
12780 #ifdef TARGET_NR_madvise
12781     case TARGET_NR_madvise:
12782         return target_madvise(arg1, arg2, arg3);
12783 #endif
12784 #ifdef TARGET_NR_fcntl64
12785     case TARGET_NR_fcntl64:
12786     {
12787         int cmd;
12788         struct flock fl;
12789         from_flock64_fn *copyfrom = copy_from_user_flock64;
12790         to_flock64_fn *copyto = copy_to_user_flock64;
12791 
12792 #ifdef TARGET_ARM
12793         if (!cpu_env->eabi) {
12794             copyfrom = copy_from_user_oabi_flock64;
12795             copyto = copy_to_user_oabi_flock64;
12796         }
12797 #endif
12798 
12799         cmd = target_to_host_fcntl_cmd(arg2);
12800         if (cmd == -TARGET_EINVAL) {
12801             return cmd;
12802         }
12803 
12804         switch(arg2) {
12805         case TARGET_F_GETLK64:
12806             ret = copyfrom(&fl, arg3);
12807             if (ret) {
12808                 break;
12809             }
12810             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12811             if (ret == 0) {
12812                 ret = copyto(arg3, &fl);
12813             }
12814 	    break;
12815 
12816         case TARGET_F_SETLK64:
12817         case TARGET_F_SETLKW64:
12818             ret = copyfrom(&fl, arg3);
12819             if (ret) {
12820                 break;
12821             }
12822             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12823 	    break;
12824         default:
12825             ret = do_fcntl(arg1, arg2, arg3);
12826             break;
12827         }
12828         return ret;
12829     }
12830 #endif
12831 #ifdef TARGET_NR_cacheflush
12832     case TARGET_NR_cacheflush:
12833         /* self-modifying code is handled automatically, so nothing needed */
12834         return 0;
12835 #endif
12836 #ifdef TARGET_NR_getpagesize
12837     case TARGET_NR_getpagesize:
12838         return TARGET_PAGE_SIZE;
12839 #endif
12840     case TARGET_NR_gettid:
12841         return get_errno(sys_gettid());
12842 #ifdef TARGET_NR_readahead
12843     case TARGET_NR_readahead:
12844 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12845         if (regpairs_aligned(cpu_env, num)) {
12846             arg2 = arg3;
12847             arg3 = arg4;
12848             arg4 = arg5;
12849         }
12850         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12851 #else
12852         ret = get_errno(readahead(arg1, arg2, arg3));
12853 #endif
12854         return ret;
12855 #endif
12856 #ifdef CONFIG_ATTR
12857 #ifdef TARGET_NR_setxattr
12858     case TARGET_NR_listxattr:
12859     case TARGET_NR_llistxattr:
12860     {
12861         void *b = 0;
12862         if (arg2) {
12863             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12864             if (!b) {
12865                 return -TARGET_EFAULT;
12866             }
12867         }
12868         p = lock_user_string(arg1);
12869         if (p) {
12870             if (num == TARGET_NR_listxattr) {
12871                 ret = get_errno(listxattr(p, b, arg3));
12872             } else {
12873                 ret = get_errno(llistxattr(p, b, arg3));
12874             }
12875         } else {
12876             ret = -TARGET_EFAULT;
12877         }
12878         unlock_user(p, arg1, 0);
12879         unlock_user(b, arg2, arg3);
12880         return ret;
12881     }
12882     case TARGET_NR_flistxattr:
12883     {
12884         void *b = 0;
12885         if (arg2) {
12886             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12887             if (!b) {
12888                 return -TARGET_EFAULT;
12889             }
12890         }
12891         ret = get_errno(flistxattr(arg1, b, arg3));
12892         unlock_user(b, arg2, arg3);
12893         return ret;
12894     }
12895     case TARGET_NR_setxattr:
12896     case TARGET_NR_lsetxattr:
12897         {
12898             void *n, *v = 0;
12899             if (arg3) {
12900                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12901                 if (!v) {
12902                     return -TARGET_EFAULT;
12903                 }
12904             }
12905             p = lock_user_string(arg1);
12906             n = lock_user_string(arg2);
12907             if (p && n) {
12908                 if (num == TARGET_NR_setxattr) {
12909                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12910                 } else {
12911                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12912                 }
12913             } else {
12914                 ret = -TARGET_EFAULT;
12915             }
12916             unlock_user(p, arg1, 0);
12917             unlock_user(n, arg2, 0);
12918             unlock_user(v, arg3, 0);
12919         }
12920         return ret;
12921     case TARGET_NR_fsetxattr:
12922         {
12923             void *n, *v = 0;
12924             if (arg3) {
12925                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12926                 if (!v) {
12927                     return -TARGET_EFAULT;
12928                 }
12929             }
12930             n = lock_user_string(arg2);
12931             if (n) {
12932                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12933             } else {
12934                 ret = -TARGET_EFAULT;
12935             }
12936             unlock_user(n, arg2, 0);
12937             unlock_user(v, arg3, 0);
12938         }
12939         return ret;
12940     case TARGET_NR_getxattr:
12941     case TARGET_NR_lgetxattr:
12942         {
12943             void *n, *v = 0;
12944             if (arg3) {
12945                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12946                 if (!v) {
12947                     return -TARGET_EFAULT;
12948                 }
12949             }
12950             p = lock_user_string(arg1);
12951             n = lock_user_string(arg2);
12952             if (p && n) {
12953                 if (num == TARGET_NR_getxattr) {
12954                     ret = get_errno(getxattr(p, n, v, arg4));
12955                 } else {
12956                     ret = get_errno(lgetxattr(p, n, v, arg4));
12957                 }
12958             } else {
12959                 ret = -TARGET_EFAULT;
12960             }
12961             unlock_user(p, arg1, 0);
12962             unlock_user(n, arg2, 0);
12963             unlock_user(v, arg3, arg4);
12964         }
12965         return ret;
12966     case TARGET_NR_fgetxattr:
12967         {
12968             void *n, *v = 0;
12969             if (arg3) {
12970                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12971                 if (!v) {
12972                     return -TARGET_EFAULT;
12973                 }
12974             }
12975             n = lock_user_string(arg2);
12976             if (n) {
12977                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12978             } else {
12979                 ret = -TARGET_EFAULT;
12980             }
12981             unlock_user(n, arg2, 0);
12982             unlock_user(v, arg3, arg4);
12983         }
12984         return ret;
12985     case TARGET_NR_removexattr:
12986     case TARGET_NR_lremovexattr:
12987         {
12988             void *n;
12989             p = lock_user_string(arg1);
12990             n = lock_user_string(arg2);
12991             if (p && n) {
12992                 if (num == TARGET_NR_removexattr) {
12993                     ret = get_errno(removexattr(p, n));
12994                 } else {
12995                     ret = get_errno(lremovexattr(p, n));
12996                 }
12997             } else {
12998                 ret = -TARGET_EFAULT;
12999             }
13000             unlock_user(p, arg1, 0);
13001             unlock_user(n, arg2, 0);
13002         }
13003         return ret;
13004     case TARGET_NR_fremovexattr:
13005         {
13006             void *n;
13007             n = lock_user_string(arg2);
13008             if (n) {
13009                 ret = get_errno(fremovexattr(arg1, n));
13010             } else {
13011                 ret = -TARGET_EFAULT;
13012             }
13013             unlock_user(n, arg2, 0);
13014         }
13015         return ret;
13016 #endif
13017 #endif /* CONFIG_ATTR */
13018 #ifdef TARGET_NR_set_thread_area
13019     case TARGET_NR_set_thread_area:
13020 #if defined(TARGET_MIPS)
13021       cpu_env->active_tc.CP0_UserLocal = arg1;
13022       return 0;
13023 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
13024       return do_set_thread_area(cpu_env, arg1);
13025 #elif defined(TARGET_M68K)
13026       {
13027           TaskState *ts = get_task_state(cpu);
13028           ts->tp_value = arg1;
13029           return 0;
13030       }
13031 #else
13032       return -TARGET_ENOSYS;
13033 #endif
13034 #endif
13035 #ifdef TARGET_NR_get_thread_area
13036     case TARGET_NR_get_thread_area:
13037 #if defined(TARGET_I386) && defined(TARGET_ABI32)
13038         return do_get_thread_area(cpu_env, arg1);
13039 #elif defined(TARGET_M68K)
13040         {
13041             TaskState *ts = get_task_state(cpu);
13042             return ts->tp_value;
13043         }
13044 #else
13045         return -TARGET_ENOSYS;
13046 #endif
13047 #endif
13048 #ifdef TARGET_NR_getdomainname
13049     case TARGET_NR_getdomainname:
13050         return -TARGET_ENOSYS;
13051 #endif
13052 
13053 #ifdef TARGET_NR_clock_settime
13054     case TARGET_NR_clock_settime:
13055     {
13056         struct timespec ts;
13057 
13058         ret = target_to_host_timespec(&ts, arg2);
13059         if (!is_error(ret)) {
13060             ret = get_errno(clock_settime(arg1, &ts));
13061         }
13062         return ret;
13063     }
13064 #endif
13065 #ifdef TARGET_NR_clock_settime64
13066     case TARGET_NR_clock_settime64:
13067     {
13068         struct timespec ts;
13069 
13070         ret = target_to_host_timespec64(&ts, arg2);
13071         if (!is_error(ret)) {
13072             ret = get_errno(clock_settime(arg1, &ts));
13073         }
13074         return ret;
13075     }
13076 #endif
13077 #ifdef TARGET_NR_clock_gettime
13078     case TARGET_NR_clock_gettime:
13079     {
13080         struct timespec ts;
13081         ret = get_errno(clock_gettime(arg1, &ts));
13082         if (!is_error(ret)) {
13083             ret = host_to_target_timespec(arg2, &ts);
13084         }
13085         return ret;
13086     }
13087 #endif
13088 #ifdef TARGET_NR_clock_gettime64
13089     case TARGET_NR_clock_gettime64:
13090     {
13091         struct timespec ts;
13092         ret = get_errno(clock_gettime(arg1, &ts));
13093         if (!is_error(ret)) {
13094             ret = host_to_target_timespec64(arg2, &ts);
13095         }
13096         return ret;
13097     }
13098 #endif
13099 #ifdef TARGET_NR_clock_getres
13100     case TARGET_NR_clock_getres:
13101     {
13102         struct timespec ts;
13103         ret = get_errno(clock_getres(arg1, &ts));
13104         if (!is_error(ret)) {
13105             host_to_target_timespec(arg2, &ts);
13106         }
13107         return ret;
13108     }
13109 #endif
13110 #ifdef TARGET_NR_clock_getres_time64
13111     case TARGET_NR_clock_getres_time64:
13112     {
13113         struct timespec ts;
13114         ret = get_errno(clock_getres(arg1, &ts));
13115         if (!is_error(ret)) {
13116             host_to_target_timespec64(arg2, &ts);
13117         }
13118         return ret;
13119     }
13120 #endif
13121 #ifdef TARGET_NR_clock_nanosleep
13122     case TARGET_NR_clock_nanosleep:
13123     {
13124         struct timespec ts;
13125         if (target_to_host_timespec(&ts, arg3)) {
13126             return -TARGET_EFAULT;
13127         }
13128         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
13129                                              &ts, arg4 ? &ts : NULL));
13130         /*
13131          * if the call is interrupted by a signal handler, it fails
13132          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
13133          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
13134          */
13135         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
13136             host_to_target_timespec(arg4, &ts)) {
13137               return -TARGET_EFAULT;
13138         }
13139 
13140         return ret;
13141     }
13142 #endif
13143 #ifdef TARGET_NR_clock_nanosleep_time64
13144     case TARGET_NR_clock_nanosleep_time64:
13145     {
13146         struct timespec ts;
13147 
13148         if (target_to_host_timespec64(&ts, arg3)) {
13149             return -TARGET_EFAULT;
13150         }
13151 
13152         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
13153                                              &ts, arg4 ? &ts : NULL));
13154 
13155         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
13156             host_to_target_timespec64(arg4, &ts)) {
13157             return -TARGET_EFAULT;
13158         }
13159         return ret;
13160     }
13161 #endif
13162 
13163 #if defined(TARGET_NR_set_tid_address)
13164     case TARGET_NR_set_tid_address:
13165     {
13166         TaskState *ts = get_task_state(cpu);
13167         ts->child_tidptr = arg1;
13168         /* do not call host set_tid_address() syscall, instead return tid() */
13169         return get_errno(sys_gettid());
13170     }
13171 #endif
13172 
13173     case TARGET_NR_tkill:
13174         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
13175 
13176     case TARGET_NR_tgkill:
13177         return get_errno(safe_tgkill((int)arg1, (int)arg2,
13178                          target_to_host_signal(arg3)));
13179 
13180 #ifdef TARGET_NR_set_robust_list
13181     case TARGET_NR_set_robust_list:
13182     case TARGET_NR_get_robust_list:
13183         /* The ABI for supporting robust futexes has userspace pass
13184          * the kernel a pointer to a linked list which is updated by
13185          * userspace after the syscall; the list is walked by the kernel
13186          * when the thread exits. Since the linked list in QEMU guest
13187          * memory isn't a valid linked list for the host and we have
13188          * no way to reliably intercept the thread-death event, we can't
13189          * support these. Silently return ENOSYS so that guest userspace
13190          * falls back to a non-robust futex implementation (which should
13191          * be OK except in the corner case of the guest crashing while
13192          * holding a mutex that is shared with another process via
13193          * shared memory).
13194          */
13195         return -TARGET_ENOSYS;
13196 #endif
13197 
13198 #if defined(TARGET_NR_utimensat)
13199     case TARGET_NR_utimensat:
13200         {
13201             struct timespec *tsp, ts[2];
13202             if (!arg3) {
13203                 tsp = NULL;
13204             } else {
13205                 if (target_to_host_timespec(ts, arg3)) {
13206                     return -TARGET_EFAULT;
13207                 }
13208                 if (target_to_host_timespec(ts + 1, arg3 +
13209                                             sizeof(struct target_timespec))) {
13210                     return -TARGET_EFAULT;
13211                 }
13212                 tsp = ts;
13213             }
13214             if (!arg2)
13215                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13216             else {
13217                 if (!(p = lock_user_string(arg2))) {
13218                     return -TARGET_EFAULT;
13219                 }
13220                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13221                 unlock_user(p, arg2, 0);
13222             }
13223         }
13224         return ret;
13225 #endif
13226 #ifdef TARGET_NR_utimensat_time64
13227     case TARGET_NR_utimensat_time64:
13228         {
13229             struct timespec *tsp, ts[2];
13230             if (!arg3) {
13231                 tsp = NULL;
13232             } else {
13233                 if (target_to_host_timespec64(ts, arg3)) {
13234                     return -TARGET_EFAULT;
13235                 }
13236                 if (target_to_host_timespec64(ts + 1, arg3 +
13237                                      sizeof(struct target__kernel_timespec))) {
13238                     return -TARGET_EFAULT;
13239                 }
13240                 tsp = ts;
13241             }
13242             if (!arg2)
13243                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13244             else {
13245                 p = lock_user_string(arg2);
13246                 if (!p) {
13247                     return -TARGET_EFAULT;
13248                 }
13249                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13250                 unlock_user(p, arg2, 0);
13251             }
13252         }
13253         return ret;
13254 #endif
13255 #ifdef TARGET_NR_futex
13256     case TARGET_NR_futex:
13257         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13258 #endif
13259 #ifdef TARGET_NR_futex_time64
13260     case TARGET_NR_futex_time64:
13261         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13262 #endif
13263 #ifdef CONFIG_INOTIFY
13264 #if defined(TARGET_NR_inotify_init)
13265     case TARGET_NR_inotify_init:
13266         ret = get_errno(inotify_init());
13267         if (ret >= 0) {
13268             fd_trans_register(ret, &target_inotify_trans);
13269         }
13270         return ret;
13271 #endif
13272 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13273     case TARGET_NR_inotify_init1:
13274         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13275                                           fcntl_flags_tbl)));
13276         if (ret >= 0) {
13277             fd_trans_register(ret, &target_inotify_trans);
13278         }
13279         return ret;
13280 #endif
13281 #if defined(TARGET_NR_inotify_add_watch)
13282     case TARGET_NR_inotify_add_watch:
13283         p = lock_user_string(arg2);
13284         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13285         unlock_user(p, arg2, 0);
13286         return ret;
13287 #endif
13288 #if defined(TARGET_NR_inotify_rm_watch)
13289     case TARGET_NR_inotify_rm_watch:
13290         return get_errno(inotify_rm_watch(arg1, arg2));
13291 #endif
13292 #endif
13293 
13294 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13295     case TARGET_NR_mq_open:
13296         {
13297             struct mq_attr posix_mq_attr;
13298             struct mq_attr *pposix_mq_attr;
13299             int host_flags;
13300 
13301             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13302             pposix_mq_attr = NULL;
13303             if (arg4) {
13304                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13305                     return -TARGET_EFAULT;
13306                 }
13307                 pposix_mq_attr = &posix_mq_attr;
13308             }
13309             p = lock_user_string(arg1 - 1);
13310             if (!p) {
13311                 return -TARGET_EFAULT;
13312             }
13313             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13314             unlock_user (p, arg1, 0);
13315         }
13316         return ret;
13317 
13318     case TARGET_NR_mq_unlink:
13319         p = lock_user_string(arg1 - 1);
13320         if (!p) {
13321             return -TARGET_EFAULT;
13322         }
13323         ret = get_errno(mq_unlink(p));
13324         unlock_user (p, arg1, 0);
13325         return ret;
13326 
13327 #ifdef TARGET_NR_mq_timedsend
13328     case TARGET_NR_mq_timedsend:
13329         {
13330             struct timespec ts;
13331 
13332             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13333             if (arg5 != 0) {
13334                 if (target_to_host_timespec(&ts, arg5)) {
13335                     return -TARGET_EFAULT;
13336                 }
13337                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13338                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13339                     return -TARGET_EFAULT;
13340                 }
13341             } else {
13342                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13343             }
13344             unlock_user (p, arg2, arg3);
13345         }
13346         return ret;
13347 #endif
13348 #ifdef TARGET_NR_mq_timedsend_time64
13349     case TARGET_NR_mq_timedsend_time64:
13350         {
13351             struct timespec ts;
13352 
13353             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13354             if (arg5 != 0) {
13355                 if (target_to_host_timespec64(&ts, arg5)) {
13356                     return -TARGET_EFAULT;
13357                 }
13358                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13359                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13360                     return -TARGET_EFAULT;
13361                 }
13362             } else {
13363                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13364             }
13365             unlock_user(p, arg2, arg3);
13366         }
13367         return ret;
13368 #endif
13369 
13370 #ifdef TARGET_NR_mq_timedreceive
13371     case TARGET_NR_mq_timedreceive:
13372         {
13373             struct timespec ts;
13374             unsigned int prio;
13375 
13376             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13377             if (arg5 != 0) {
13378                 if (target_to_host_timespec(&ts, arg5)) {
13379                     return -TARGET_EFAULT;
13380                 }
13381                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13382                                                      &prio, &ts));
13383                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13384                     return -TARGET_EFAULT;
13385                 }
13386             } else {
13387                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13388                                                      &prio, NULL));
13389             }
13390             unlock_user (p, arg2, arg3);
13391             if (arg4 != 0)
13392                 put_user_u32(prio, arg4);
13393         }
13394         return ret;
13395 #endif
13396 #ifdef TARGET_NR_mq_timedreceive_time64
13397     case TARGET_NR_mq_timedreceive_time64:
13398         {
13399             struct timespec ts;
13400             unsigned int prio;
13401 
13402             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13403             if (arg5 != 0) {
13404                 if (target_to_host_timespec64(&ts, arg5)) {
13405                     return -TARGET_EFAULT;
13406                 }
13407                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13408                                                      &prio, &ts));
13409                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13410                     return -TARGET_EFAULT;
13411                 }
13412             } else {
13413                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13414                                                      &prio, NULL));
13415             }
13416             unlock_user(p, arg2, arg3);
13417             if (arg4 != 0) {
13418                 put_user_u32(prio, arg4);
13419             }
13420         }
13421         return ret;
13422 #endif
13423 
13424     /* Not implemented for now... */
13425 /*     case TARGET_NR_mq_notify: */
13426 /*         break; */
13427 
13428     case TARGET_NR_mq_getsetattr:
13429         {
13430             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13431             ret = 0;
13432             if (arg2 != 0) {
13433                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13434                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13435                                            &posix_mq_attr_out));
13436             } else if (arg3 != 0) {
13437                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13438             }
13439             if (ret == 0 && arg3 != 0) {
13440                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13441             }
13442         }
13443         return ret;
13444 #endif
13445 
13446 #ifdef CONFIG_SPLICE
13447 #ifdef TARGET_NR_tee
13448     case TARGET_NR_tee:
13449         {
13450             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13451         }
13452         return ret;
13453 #endif
13454 #ifdef TARGET_NR_splice
13455     case TARGET_NR_splice:
13456         {
13457             loff_t loff_in, loff_out;
13458             loff_t *ploff_in = NULL, *ploff_out = NULL;
13459             if (arg2) {
13460                 if (get_user_u64(loff_in, arg2)) {
13461                     return -TARGET_EFAULT;
13462                 }
13463                 ploff_in = &loff_in;
13464             }
13465             if (arg4) {
13466                 if (get_user_u64(loff_out, arg4)) {
13467                     return -TARGET_EFAULT;
13468                 }
13469                 ploff_out = &loff_out;
13470             }
13471             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13472             if (arg2) {
13473                 if (put_user_u64(loff_in, arg2)) {
13474                     return -TARGET_EFAULT;
13475                 }
13476             }
13477             if (arg4) {
13478                 if (put_user_u64(loff_out, arg4)) {
13479                     return -TARGET_EFAULT;
13480                 }
13481             }
13482         }
13483         return ret;
13484 #endif
13485 #ifdef TARGET_NR_vmsplice
13486 	case TARGET_NR_vmsplice:
13487         {
13488             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13489             if (vec != NULL) {
13490                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13491                 unlock_iovec(vec, arg2, arg3, 0);
13492             } else {
13493                 ret = -host_to_target_errno(errno);
13494             }
13495         }
13496         return ret;
13497 #endif
13498 #endif /* CONFIG_SPLICE */
13499 #ifdef CONFIG_EVENTFD
13500 #if defined(TARGET_NR_eventfd)
13501     case TARGET_NR_eventfd:
13502         ret = get_errno(eventfd(arg1, 0));
13503         if (ret >= 0) {
13504             fd_trans_register(ret, &target_eventfd_trans);
13505         }
13506         return ret;
13507 #endif
13508 #if defined(TARGET_NR_eventfd2)
13509     case TARGET_NR_eventfd2:
13510     {
13511         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13512         if (arg2 & TARGET_O_NONBLOCK) {
13513             host_flags |= O_NONBLOCK;
13514         }
13515         if (arg2 & TARGET_O_CLOEXEC) {
13516             host_flags |= O_CLOEXEC;
13517         }
13518         ret = get_errno(eventfd(arg1, host_flags));
13519         if (ret >= 0) {
13520             fd_trans_register(ret, &target_eventfd_trans);
13521         }
13522         return ret;
13523     }
13524 #endif
13525 #endif /* CONFIG_EVENTFD  */
13526 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13527     case TARGET_NR_fallocate:
13528 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13529         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13530                                   target_offset64(arg5, arg6)));
13531 #else
13532         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13533 #endif
13534         return ret;
13535 #endif
13536 #if defined(CONFIG_SYNC_FILE_RANGE)
13537 #if defined(TARGET_NR_sync_file_range)
13538     case TARGET_NR_sync_file_range:
13539 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13540 #if defined(TARGET_MIPS)
13541         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13542                                         target_offset64(arg5, arg6), arg7));
13543 #else
13544         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13545                                         target_offset64(arg4, arg5), arg6));
13546 #endif /* !TARGET_MIPS */
13547 #else
13548         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13549 #endif
13550         return ret;
13551 #endif
13552 #if defined(TARGET_NR_sync_file_range2) || \
13553     defined(TARGET_NR_arm_sync_file_range)
13554 #if defined(TARGET_NR_sync_file_range2)
13555     case TARGET_NR_sync_file_range2:
13556 #endif
13557 #if defined(TARGET_NR_arm_sync_file_range)
13558     case TARGET_NR_arm_sync_file_range:
13559 #endif
13560         /* This is like sync_file_range but the arguments are reordered */
13561 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13562         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13563                                         target_offset64(arg5, arg6), arg2));
13564 #else
13565         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13566 #endif
13567         return ret;
13568 #endif
13569 #endif
13570 #if defined(TARGET_NR_signalfd4)
13571     case TARGET_NR_signalfd4:
13572         return do_signalfd4(arg1, arg2, arg4);
13573 #endif
13574 #if defined(TARGET_NR_signalfd)
13575     case TARGET_NR_signalfd:
13576         return do_signalfd4(arg1, arg2, 0);
13577 #endif
13578 #if defined(CONFIG_EPOLL)
13579 #if defined(TARGET_NR_epoll_create)
13580     case TARGET_NR_epoll_create:
13581         return get_errno(epoll_create(arg1));
13582 #endif
13583 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13584     case TARGET_NR_epoll_create1:
13585         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13586 #endif
13587 #if defined(TARGET_NR_epoll_ctl)
13588     case TARGET_NR_epoll_ctl:
13589     {
13590         struct epoll_event ep;
13591         struct epoll_event *epp = 0;
13592         if (arg4) {
13593             if (arg2 != EPOLL_CTL_DEL) {
13594                 struct target_epoll_event *target_ep;
13595                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13596                     return -TARGET_EFAULT;
13597                 }
13598                 ep.events = tswap32(target_ep->events);
13599                 /*
13600                  * The epoll_data_t union is just opaque data to the kernel,
13601                  * so we transfer all 64 bits across and need not worry what
13602                  * actual data type it is.
13603                  */
13604                 ep.data.u64 = tswap64(target_ep->data.u64);
13605                 unlock_user_struct(target_ep, arg4, 0);
13606             }
13607             /*
13608              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13609              * non-null pointer, even though this argument is ignored.
13610              *
13611              */
13612             epp = &ep;
13613         }
13614         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13615     }
13616 #endif
13617 
13618 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13619 #if defined(TARGET_NR_epoll_wait)
13620     case TARGET_NR_epoll_wait:
13621 #endif
13622 #if defined(TARGET_NR_epoll_pwait)
13623     case TARGET_NR_epoll_pwait:
13624 #endif
13625     {
13626         struct target_epoll_event *target_ep;
13627         struct epoll_event *ep;
13628         int epfd = arg1;
13629         int maxevents = arg3;
13630         int timeout = arg4;
13631 
13632         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13633             return -TARGET_EINVAL;
13634         }
13635 
13636         target_ep = lock_user(VERIFY_WRITE, arg2,
13637                               maxevents * sizeof(struct target_epoll_event), 1);
13638         if (!target_ep) {
13639             return -TARGET_EFAULT;
13640         }
13641 
13642         ep = g_try_new(struct epoll_event, maxevents);
13643         if (!ep) {
13644             unlock_user(target_ep, arg2, 0);
13645             return -TARGET_ENOMEM;
13646         }
13647 
13648         switch (num) {
13649 #if defined(TARGET_NR_epoll_pwait)
13650         case TARGET_NR_epoll_pwait:
13651         {
13652             sigset_t *set = NULL;
13653 
13654             if (arg5) {
13655                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13656                 if (ret != 0) {
13657                     break;
13658                 }
13659             }
13660 
13661             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13662                                              set, SIGSET_T_SIZE));
13663 
13664             if (set) {
13665                 finish_sigsuspend_mask(ret);
13666             }
13667             break;
13668         }
13669 #endif
13670 #if defined(TARGET_NR_epoll_wait)
13671         case TARGET_NR_epoll_wait:
13672             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13673                                              NULL, 0));
13674             break;
13675 #endif
13676         default:
13677             ret = -TARGET_ENOSYS;
13678         }
13679         if (!is_error(ret)) {
13680             int i;
13681             for (i = 0; i < ret; i++) {
13682                 target_ep[i].events = tswap32(ep[i].events);
13683                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13684             }
13685             unlock_user(target_ep, arg2,
13686                         ret * sizeof(struct target_epoll_event));
13687         } else {
13688             unlock_user(target_ep, arg2, 0);
13689         }
13690         g_free(ep);
13691         return ret;
13692     }
13693 #endif
13694 #endif
13695 #ifdef TARGET_NR_prlimit64
13696     case TARGET_NR_prlimit64:
13697     {
13698         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13699         struct target_rlimit64 *target_rnew, *target_rold;
13700         struct host_rlimit64 rnew, rold, *rnewp = 0;
13701         int resource = target_to_host_resource(arg2);
13702 
13703         if (arg3 && (resource != RLIMIT_AS &&
13704                      resource != RLIMIT_DATA &&
13705                      resource != RLIMIT_STACK)) {
13706             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13707                 return -TARGET_EFAULT;
13708             }
13709             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13710             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13711             unlock_user_struct(target_rnew, arg3, 0);
13712             rnewp = &rnew;
13713         }
13714 
13715         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13716         if (!is_error(ret) && arg4) {
13717             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13718                 return -TARGET_EFAULT;
13719             }
13720             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13721             __put_user(rold.rlim_max, &target_rold->rlim_max);
13722             unlock_user_struct(target_rold, arg4, 1);
13723         }
13724         return ret;
13725     }
13726 #endif
13727 #ifdef TARGET_NR_gethostname
13728     case TARGET_NR_gethostname:
13729     {
13730         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13731         if (name) {
13732             ret = get_errno(gethostname(name, arg2));
13733             unlock_user(name, arg1, arg2);
13734         } else {
13735             ret = -TARGET_EFAULT;
13736         }
13737         return ret;
13738     }
13739 #endif
13740 #ifdef TARGET_NR_atomic_cmpxchg_32
13741     case TARGET_NR_atomic_cmpxchg_32:
13742     {
13743         /* should use start_exclusive from main.c */
13744         abi_ulong mem_value;
13745         if (get_user_u32(mem_value, arg6)) {
13746             target_siginfo_t info;
13747             info.si_signo = SIGSEGV;
13748             info.si_errno = 0;
13749             info.si_code = TARGET_SEGV_MAPERR;
13750             info._sifields._sigfault._addr = arg6;
13751             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13752             ret = 0xdeadbeef;
13753 
13754         }
13755         if (mem_value == arg2)
13756             put_user_u32(arg1, arg6);
13757         return mem_value;
13758     }
13759 #endif
13760 #ifdef TARGET_NR_atomic_barrier
13761     case TARGET_NR_atomic_barrier:
13762         /* Like the kernel implementation and the
13763            qemu arm barrier, no-op this? */
13764         return 0;
13765 #endif
13766 
13767 #ifdef TARGET_NR_timer_create
13768     case TARGET_NR_timer_create:
13769     {
13770         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13771 
13772         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13773 
13774         int clkid = arg1;
13775         int timer_index = next_free_host_timer();
13776 
13777         if (timer_index < 0) {
13778             ret = -TARGET_EAGAIN;
13779         } else {
13780             timer_t *phtimer = g_posix_timers  + timer_index;
13781 
13782             if (arg2) {
13783                 phost_sevp = &host_sevp;
13784                 ret = target_to_host_sigevent(phost_sevp, arg2);
13785                 if (ret != 0) {
13786                     free_host_timer_slot(timer_index);
13787                     return ret;
13788                 }
13789             }
13790 
13791             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13792             if (ret) {
13793                 free_host_timer_slot(timer_index);
13794             } else {
13795                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13796                     timer_delete(*phtimer);
13797                     free_host_timer_slot(timer_index);
13798                     return -TARGET_EFAULT;
13799                 }
13800             }
13801         }
13802         return ret;
13803     }
13804 #endif
13805 
13806 #ifdef TARGET_NR_timer_settime
13807     case TARGET_NR_timer_settime:
13808     {
13809         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13810          * struct itimerspec * old_value */
13811         target_timer_t timerid = get_timer_id(arg1);
13812 
13813         if (timerid < 0) {
13814             ret = timerid;
13815         } else if (arg3 == 0) {
13816             ret = -TARGET_EINVAL;
13817         } else {
13818             timer_t htimer = g_posix_timers[timerid];
13819             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13820 
13821             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13822                 return -TARGET_EFAULT;
13823             }
13824             ret = get_errno(
13825                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13826             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13827                 return -TARGET_EFAULT;
13828             }
13829         }
13830         return ret;
13831     }
13832 #endif
13833 
13834 #ifdef TARGET_NR_timer_settime64
13835     case TARGET_NR_timer_settime64:
13836     {
13837         target_timer_t timerid = get_timer_id(arg1);
13838 
13839         if (timerid < 0) {
13840             ret = timerid;
13841         } else if (arg3 == 0) {
13842             ret = -TARGET_EINVAL;
13843         } else {
13844             timer_t htimer = g_posix_timers[timerid];
13845             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13846 
13847             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13848                 return -TARGET_EFAULT;
13849             }
13850             ret = get_errno(
13851                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13852             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13853                 return -TARGET_EFAULT;
13854             }
13855         }
13856         return ret;
13857     }
13858 #endif
13859 
13860 #ifdef TARGET_NR_timer_gettime
13861     case TARGET_NR_timer_gettime:
13862     {
13863         /* args: timer_t timerid, struct itimerspec *curr_value */
13864         target_timer_t timerid = get_timer_id(arg1);
13865 
13866         if (timerid < 0) {
13867             ret = timerid;
13868         } else if (!arg2) {
13869             ret = -TARGET_EFAULT;
13870         } else {
13871             timer_t htimer = g_posix_timers[timerid];
13872             struct itimerspec hspec;
13873             ret = get_errno(timer_gettime(htimer, &hspec));
13874 
13875             if (host_to_target_itimerspec(arg2, &hspec)) {
13876                 ret = -TARGET_EFAULT;
13877             }
13878         }
13879         return ret;
13880     }
13881 #endif
13882 
13883 #ifdef TARGET_NR_timer_gettime64
13884     case TARGET_NR_timer_gettime64:
13885     {
13886         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13887         target_timer_t timerid = get_timer_id(arg1);
13888 
13889         if (timerid < 0) {
13890             ret = timerid;
13891         } else if (!arg2) {
13892             ret = -TARGET_EFAULT;
13893         } else {
13894             timer_t htimer = g_posix_timers[timerid];
13895             struct itimerspec hspec;
13896             ret = get_errno(timer_gettime(htimer, &hspec));
13897 
13898             if (host_to_target_itimerspec64(arg2, &hspec)) {
13899                 ret = -TARGET_EFAULT;
13900             }
13901         }
13902         return ret;
13903     }
13904 #endif
13905 
13906 #ifdef TARGET_NR_timer_getoverrun
13907     case TARGET_NR_timer_getoverrun:
13908     {
13909         /* args: timer_t timerid */
13910         target_timer_t timerid = get_timer_id(arg1);
13911 
13912         if (timerid < 0) {
13913             ret = timerid;
13914         } else {
13915             timer_t htimer = g_posix_timers[timerid];
13916             ret = get_errno(timer_getoverrun(htimer));
13917         }
13918         return ret;
13919     }
13920 #endif
13921 
13922 #ifdef TARGET_NR_timer_delete
13923     case TARGET_NR_timer_delete:
13924     {
13925         /* args: timer_t timerid */
13926         target_timer_t timerid = get_timer_id(arg1);
13927 
13928         if (timerid < 0) {
13929             ret = timerid;
13930         } else {
13931             timer_t htimer = g_posix_timers[timerid];
13932             ret = get_errno(timer_delete(htimer));
13933             free_host_timer_slot(timerid);
13934         }
13935         return ret;
13936     }
13937 #endif
13938 
13939 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13940     case TARGET_NR_timerfd_create:
13941         ret = get_errno(timerfd_create(arg1,
13942                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13943         if (ret >= 0) {
13944             fd_trans_register(ret, &target_timerfd_trans);
13945         }
13946         return ret;
13947 #endif
13948 
13949 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13950     case TARGET_NR_timerfd_gettime:
13951         {
13952             struct itimerspec its_curr;
13953 
13954             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13955 
13956             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13957                 return -TARGET_EFAULT;
13958             }
13959         }
13960         return ret;
13961 #endif
13962 
13963 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13964     case TARGET_NR_timerfd_gettime64:
13965         {
13966             struct itimerspec its_curr;
13967 
13968             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13969 
13970             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13971                 return -TARGET_EFAULT;
13972             }
13973         }
13974         return ret;
13975 #endif
13976 
13977 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13978     case TARGET_NR_timerfd_settime:
13979         {
13980             struct itimerspec its_new, its_old, *p_new;
13981 
13982             if (arg3) {
13983                 if (target_to_host_itimerspec(&its_new, arg3)) {
13984                     return -TARGET_EFAULT;
13985                 }
13986                 p_new = &its_new;
13987             } else {
13988                 p_new = NULL;
13989             }
13990 
13991             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13992 
13993             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13994                 return -TARGET_EFAULT;
13995             }
13996         }
13997         return ret;
13998 #endif
13999 
14000 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
14001     case TARGET_NR_timerfd_settime64:
14002         {
14003             struct itimerspec its_new, its_old, *p_new;
14004 
14005             if (arg3) {
14006                 if (target_to_host_itimerspec64(&its_new, arg3)) {
14007                     return -TARGET_EFAULT;
14008                 }
14009                 p_new = &its_new;
14010             } else {
14011                 p_new = NULL;
14012             }
14013 
14014             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
14015 
14016             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
14017                 return -TARGET_EFAULT;
14018             }
14019         }
14020         return ret;
14021 #endif
14022 
14023 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
14024     case TARGET_NR_ioprio_get:
14025         return get_errno(ioprio_get(arg1, arg2));
14026 #endif
14027 
14028 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
14029     case TARGET_NR_ioprio_set:
14030         return get_errno(ioprio_set(arg1, arg2, arg3));
14031 #endif
14032 
14033 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
14034     case TARGET_NR_setns:
14035         return get_errno(setns(arg1, arg2));
14036 #endif
14037 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
14038     case TARGET_NR_unshare:
14039         return get_errno(unshare(arg1));
14040 #endif
14041 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
14042     case TARGET_NR_kcmp:
14043         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
14044 #endif
14045 #ifdef TARGET_NR_swapcontext
14046     case TARGET_NR_swapcontext:
14047         /* PowerPC specific.  */
14048         return do_swapcontext(cpu_env, arg1, arg2, arg3);
14049 #endif
14050 #ifdef TARGET_NR_memfd_create
14051     case TARGET_NR_memfd_create:
14052         p = lock_user_string(arg1);
14053         if (!p) {
14054             return -TARGET_EFAULT;
14055         }
14056         ret = get_errno(memfd_create(p, arg2));
14057         fd_trans_unregister(ret);
14058         unlock_user(p, arg1, 0);
14059         return ret;
14060 #endif
14061 #if defined TARGET_NR_membarrier && defined __NR_membarrier
14062     case TARGET_NR_membarrier:
14063         return get_errno(membarrier(arg1, arg2));
14064 #endif
14065 
14066 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
14067     case TARGET_NR_copy_file_range:
14068         {
14069             loff_t inoff, outoff;
14070             loff_t *pinoff = NULL, *poutoff = NULL;
14071 
14072             if (arg2) {
14073                 if (get_user_u64(inoff, arg2)) {
14074                     return -TARGET_EFAULT;
14075                 }
14076                 pinoff = &inoff;
14077             }
14078             if (arg4) {
14079                 if (get_user_u64(outoff, arg4)) {
14080                     return -TARGET_EFAULT;
14081                 }
14082                 poutoff = &outoff;
14083             }
14084             /* Do not sign-extend the count parameter. */
14085             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
14086                                                  (abi_ulong)arg5, arg6));
14087             if (!is_error(ret) && ret > 0) {
14088                 if (arg2) {
14089                     if (put_user_u64(inoff, arg2)) {
14090                         return -TARGET_EFAULT;
14091                     }
14092                 }
14093                 if (arg4) {
14094                     if (put_user_u64(outoff, arg4)) {
14095                         return -TARGET_EFAULT;
14096                     }
14097                 }
14098             }
14099         }
14100         return ret;
14101 #endif
14102 
14103 #if defined(TARGET_NR_pivot_root)
14104     case TARGET_NR_pivot_root:
14105         {
14106             void *p2;
14107             p = lock_user_string(arg1); /* new_root */
14108             p2 = lock_user_string(arg2); /* put_old */
14109             if (!p || !p2) {
14110                 ret = -TARGET_EFAULT;
14111             } else {
14112                 ret = get_errno(pivot_root(p, p2));
14113             }
14114             unlock_user(p2, arg2, 0);
14115             unlock_user(p, arg1, 0);
14116         }
14117         return ret;
14118 #endif
14119 
14120 #if defined(TARGET_NR_riscv_hwprobe)
14121     case TARGET_NR_riscv_hwprobe:
14122         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
14123 #endif
14124 
14125 #ifdef TARGET_AARCH64
14126     case TARGET_NR_map_shadow_stack:
14127         return do_map_shadow_stack(cpu_env, arg1, arg2, arg3);
14128 #endif
14129 
14130     default:
14131         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
14132         return -TARGET_ENOSYS;
14133     }
14134     return ret;
14135 }
14136 
14137 static bool sys_dispatch(CPUState *cpu, TaskState *ts)
14138 {
14139     abi_ptr pc;
14140 
14141     if (likely(ts->sys_dispatch_len == -1)) {
14142         return false;
14143     }
14144 
14145     pc = cpu->cc->get_pc(cpu);
14146     if (likely(pc - ts->sys_dispatch < ts->sys_dispatch_len)) {
14147         return false;
14148     }
14149     if (unlikely(is_vdso_sigreturn(pc))) {
14150         return false;
14151     }
14152     if (likely(ts->sys_dispatch_selector)) {
14153         uint8_t sb;
14154         if (get_user_u8(sb, ts->sys_dispatch_selector)) {
14155             force_sig(TARGET_SIGSEGV);
14156             return true;
14157         }
14158         if (likely(sb == SYSCALL_DISPATCH_FILTER_ALLOW)) {
14159             return false;
14160         }
14161         if (unlikely(sb != SYSCALL_DISPATCH_FILTER_BLOCK)) {
14162             force_sig(TARGET_SIGSYS);
14163             return true;
14164         }
14165     }
14166     force_sig_fault(TARGET_SIGSYS, TARGET_SYS_USER_DISPATCH, pc);
14167     return true;
14168 }
14169 
14170 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
14171                     abi_long arg2, abi_long arg3, abi_long arg4,
14172                     abi_long arg5, abi_long arg6, abi_long arg7,
14173                     abi_long arg8)
14174 {
14175     CPUState *cpu = env_cpu(cpu_env);
14176     TaskState *ts = get_task_state(cpu);
14177     abi_long ret;
14178 
14179 #ifdef DEBUG_ERESTARTSYS
14180     /* Debug-only code for exercising the syscall-restart code paths
14181      * in the per-architecture cpu main loops: restart every syscall
14182      * the guest makes once before letting it through.
14183      */
14184     {
14185         static bool flag;
14186         flag = !flag;
14187         if (flag) {
14188             return -QEMU_ERESTARTSYS;
14189         }
14190     }
14191 #endif
14192 
14193     if (sys_dispatch(cpu, ts)) {
14194         return -QEMU_ESIGRETURN;
14195     }
14196 
14197     record_syscall_start(cpu, num, arg1,
14198                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
14199 
14200     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
14201         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
14202     }
14203 
14204     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
14205                       arg5, arg6, arg7, arg8);
14206 
14207     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
14208         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
14209                           arg3, arg4, arg5, arg6);
14210     }
14211 
14212     record_syscall_return(cpu, num, ret);
14213     return ret;
14214 }
14215